Skip to content

Commit

Permalink
Merge branch 'develop' into env1-sampling
Browse files Browse the repository at this point in the history
  • Loading branch information
0xmountaintop authored Apr 13, 2024
2 parents 6cc1f42 + 5b827c3 commit 2cccdf7
Show file tree
Hide file tree
Showing 13 changed files with 95 additions and 15 deletions.
2 changes: 1 addition & 1 deletion common/version/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"runtime/debug"
)

var tag = "v4.3.91"
var tag = "v4.3.92"

var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
Expand Down
8 changes: 8 additions & 0 deletions coordinator/internal/orm/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
Expand Down Expand Up @@ -54,6 +55,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`

// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`

// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
Expand Down Expand Up @@ -248,6 +253,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
Expand All @@ -262,6 +268,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ActiveAttempts: 0,
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}

db := o.db
Expand Down
6 changes: 6 additions & 0 deletions coordinator/internal/orm/chunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`

// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`

// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
Expand Down Expand Up @@ -300,6 +304,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
TotalAttempts: 0,
ActiveAttempts: 0,
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}

db := o.db
Expand Down
6 changes: 3 additions & 3 deletions database/migrate/migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,20 +59,20 @@ func testResetDB(t *testing.T) {
cur, err := Current(pgDB)
assert.NoError(t, err)
// total number of tables.
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(17), cur)
}

func testMigrate(t *testing.T) {
assert.NoError(t, Migrate(pgDB))
cur, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), cur)
assert.Equal(t, int64(17), cur)
}

func testRollback(t *testing.T) {
version, err := Current(pgDB)
assert.NoError(t, err)
assert.Equal(t, int64(16), version)
assert.Equal(t, int64(17), version)

assert.NoError(t, Rollback(pgDB, nil))

Expand Down
27 changes: 27 additions & 0 deletions database/migrate/migrations/00017_add_blob_meta_data.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
-- +goose Up
-- +goose StatementBegin

ALTER TABLE chunk
ADD COLUMN crc_max INTEGER DEFAULT 0,
ADD COLUMN blob_size INTEGER DEFAULT 0;

ALTER TABLE batch
ADD COLUMN data_hash VARCHAR DEFAULT '',
ADD COLUMN blob_data_proof BYTEA DEFAULT NULL,
ADD COLUMN blob_size INTEGER DEFAULT 0;

-- +goose StatementEnd

-- +goose Down
-- +goose StatementBegin

ALTER TABLE IF EXISTS batch
DROP COLUMN data_hash,
DROP COLUMN blob_data_proof,
DROP COLUMN blob_size;

ALTER TABLE IF EXISTS chunk
DROP COLUMN crc_max,
DROP COLUMN blob_size;

-- +goose StatementEnd
2 changes: 1 addition & 1 deletion rollup/cmd/event_watcher/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func action(ctx *cli.Context) error {
}
})

log.Info("Start event-watcher successfully")
log.Info("Start event-watcher successfully", "version", version.Version)

// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
Expand Down
2 changes: 1 addition & 1 deletion rollup/cmd/gas_oracle/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)

// Finish start all message relayer functions
log.Info("Start gas-oracle successfully")
log.Info("Start gas-oracle successfully", "version", version.Version)

// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
Expand Down
2 changes: 1 addition & 1 deletion rollup/cmd/rollup_relayer/app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)

// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")
log.Info("Start rollup-relayer successfully", "version", version.Version)

// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
Expand Down
8 changes: 8 additions & 0 deletions rollup/internal/orm/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
Expand Down Expand Up @@ -53,6 +54,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`

// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`

// metadata
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
TotalL1CommitCalldataSize uint64 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
Expand Down Expand Up @@ -257,6 +262,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
newBatch := Batch{
Index: batch.Index,
Hash: batchMeta.BatchHash.Hex(),
DataHash: batchMeta.BatchDataHash.Hex(),
StartChunkHash: batchMeta.StartChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: batchMeta.EndChunkHash.Hex(),
Expand All @@ -271,6 +277,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
BlobSize: metrics.L1CommitBlobSize,
}

db := o.db
Expand Down
6 changes: 6 additions & 0 deletions rollup/internal/orm/chunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`

// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`

// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
Expand Down Expand Up @@ -212,6 +216,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: metrics.CrcMax,
BlobSize: metrics.L1CommitBlobSize,
}

db := o.db
Expand Down
27 changes: 19 additions & 8 deletions rollup/internal/utils/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,10 +191,12 @@ func GetChunkHash(chunk *encoding.Chunk, totalL1MessagePoppedBefore uint64, code

// BatchMetadata represents the metadata of a batch.
type BatchMetadata struct {
BatchHash common.Hash
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
BatchHash common.Hash
BatchDataHash common.Hash
BatchBlobDataProof []byte
BatchBytes []byte
StartChunkHash common.Hash
EndChunkHash common.Hash
}

// GetBatchMetadata retrieves the metadata of a batch.
Expand All @@ -212,9 +214,11 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv0 DA batch: %w", err)
}

// BatchBlobDataProof is left as empty for codecv0.
batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash,
BatchBytes: daBatch.Encode(),
}

startDAChunk, err := codecv0.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
Expand Down Expand Up @@ -243,9 +247,16 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion)
return nil, fmt.Errorf("failed to create codecv1 DA batch: %w", err)
}

blobDataProof, err := daBatch.BlobDataProof()
if err != nil {
return nil, fmt.Errorf("failed to get codecv1 blob data proof: %w", err)
}

batchMeta := &BatchMetadata{
BatchHash: daBatch.Hash(),
BatchBytes: daBatch.Encode(),
BatchHash: daBatch.Hash(),
BatchDataHash: daBatch.DataHash,
BatchBlobDataProof: blobDataProof,
BatchBytes: daBatch.Encode(),
}

startDAChunk, err := codecv1.NewDAChunk(batch.Chunks[0], batch.TotalL1MessagePoppedBefore)
Expand Down
8 changes: 8 additions & 0 deletions tests/integration-test/orm/batch.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ type Batch struct {
// batch
Index uint64 `json:"index" gorm:"column:index"`
Hash string `json:"hash" gorm:"column:hash"`
DataHash string `json:"data_hash" gorm:"column:data_hash"`
StartChunkIndex uint64 `json:"start_chunk_index" gorm:"column:start_chunk_index"`
StartChunkHash string `json:"start_chunk_hash" gorm:"column:start_chunk_hash"`
EndChunkIndex uint64 `json:"end_chunk_index" gorm:"column:end_chunk_index"`
Expand Down Expand Up @@ -49,6 +50,10 @@ type Batch struct {
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`

// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`

// metadata
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
Expand Down Expand Up @@ -151,6 +156,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
newBatch := Batch{
Index: batch.Index,
Hash: daBatch.Hash().Hex(),
DataHash: daBatch.DataHash.Hex(),
StartChunkHash: startDAChunkHash.Hex(),
StartChunkIndex: startChunkIndex,
EndChunkHash: endDAChunkHash.Hex(),
Expand All @@ -163,6 +169,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
BlobDataProof: nil, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}

db := o.db
Expand Down
6 changes: 6 additions & 0 deletions tests/integration-test/orm/chunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ type Chunk struct {
// batch
BatchHash string `json:"batch_hash" gorm:"column:batch_hash;default:NULL"`

// blob
CrcMax uint64 `json:"crc_max" gorm:"column:crc_max"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`

// metadata
TotalL2TxGas uint64 `json:"total_l2_tx_gas" gorm:"column:total_l2_tx_gas"`
TotalL2TxNum uint64 `json:"total_l2_tx_num" gorm:"column:total_l2_tx_num"`
Expand Down Expand Up @@ -150,6 +154,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, dbTX ...
ParentChunkStateRoot: parentChunkStateRoot,
WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(),
ProvingStatus: int16(types.ProvingTaskUnassigned),
CrcMax: 0, // using mock value because this piece of codes is only used in unit tests
BlobSize: 0, // using mock value because this piece of codes is only used in unit tests
}

db := o.db
Expand Down

0 comments on commit 2cccdf7

Please sign in to comment.