Skip to content
Prev Previous commit
Next Next commit
add ConvertBlobToBlobBytes utility functions
  • Loading branch information
colinlyguo committed Aug 19, 2024
commit 030349d59730d9624a5f7d7be0165740ed6ee3d9
28 changes: 28 additions & 0 deletions encoding/codecv3/codecv3.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,6 +231,34 @@ func (b *DABatch) Blob() *kzg4844.Blob {
return b.blob
}

// ConvertBlobToBlobBytes converts the canonical blob representation into DA blob bytes.
func (b *DABatch) ConvertBlobToBlobBytes() ([]byte, error) {
var blobBytes [126976]byte

for from := 0; from < len(b.blob); from += 32 {
copy(blobBytes[from/32*31:], b.blob[from+1:from+32])
}

metadataLength := 2 + MaxNumChunks*4
numChunks := binary.BigEndian.Uint16(blobBytes[:2])

if numChunks > MaxNumChunks {
return nil, fmt.Errorf("number of chunks (%d) exceeds maximum allowed chunks (%d)", numChunks, MaxNumChunks)
}

totalSize := metadataLength
for i := 0; i < int(numChunks); i++ {
chunkSize := binary.BigEndian.Uint32(blobBytes[2+4*i:])
totalSize += int(chunkSize)

if totalSize > len(blobBytes) {
return nil, fmt.Errorf("calculated total size (%d) exceeds the length of blobBytes (%d)", totalSize, len(blobBytes))
}
}

return blobBytes[:totalSize], nil
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk) (uint64, uint64, error) {
return codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(c)
Expand Down
29 changes: 29 additions & 0 deletions encoding/codecv4/codecv4.go
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,35 @@ func (b *DABatch) Blob() *kzg4844.Blob {
return b.blob
}

// ConvertBlobToBlobBytes converts the canonical blob representation into DA blob bytes.
func (b *DABatch) ConvertBlobToBlobBytes() ([]byte, error) {
var blobBytes [126976]byte

for from := 0; from < len(b.blob); from += 32 {
copy(blobBytes[from/32*31:], b.blob[from+1:from+32])
}

startIndex := 1 // Skip the flag byte in codecv4
metadataLength := startIndex + 2 + MaxNumChunks*4
numChunks := binary.BigEndian.Uint16(blobBytes[startIndex : startIndex+2])

if numChunks > MaxNumChunks {
return nil, fmt.Errorf("number of chunks (%d) exceeds maximum allowed chunks (%d)", numChunks, MaxNumChunks)
}

totalSize := metadataLength
for i := 0; i < int(numChunks); i++ {
chunkSize := binary.BigEndian.Uint32(blobBytes[startIndex+2+4*i:])
totalSize += int(chunkSize)

if totalSize > len(blobBytes) {
return nil, fmt.Errorf("calculated total size (%d) exceeds the length of blobBytes (%d)", totalSize, len(blobBytes))
}
}

return blobBytes[:totalSize], nil
}

// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit uncompressed batch size and compressed blob size for a single chunk.
func EstimateChunkL1CommitBatchSizeAndBlobSize(c *encoding.Chunk, enableEncode bool) (uint64, uint64, error) {
batchBytes, err := constructBatchPayload([]*encoding.Chunk{c})
Expand Down