diff --git a/rollup/internal/controller/watcher/batch_proposer.go b/rollup/internal/controller/watcher/batch_proposer.go index 7b99c2c4c8..b98b815ff3 100644 --- a/rollup/internal/controller/watcher/batch_proposer.go +++ b/rollup/internal/controller/watcher/batch_proposer.go @@ -48,7 +48,8 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db * "maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch, "maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch, "maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch, - "batchTimeoutSec", cfg.BatchTimeoutSec) + "batchTimeoutSec", cfg.BatchTimeoutSec, + "gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier) return &BatchProposer{ ctx: ctx, @@ -178,6 +179,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er // Add extra gas costs totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch totalL1CommitGas += 20000 // 1 time sstore + totalL1CommitGas += 21000 // base fee for tx totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata // adjusting gas: @@ -208,6 +210,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256) totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256) totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas)) + if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch { // Check if the first chunk breaks hard limits. diff --git a/rollup/internal/controller/watcher/batch_proposer_test.go b/rollup/internal/controller/watcher/batch_proposer_test.go index a8dd81aede..f5c8e18e82 100644 --- a/rollup/internal/controller/watcher/batch_proposer_test.go +++ b/rollup/internal/controller/watcher/batch_proposer_test.go @@ -13,54 +13,118 @@ import ( "scroll-tech/rollup/internal/orm" ) -// TODO: Add unit tests that the limits are enforced correctly. -func testBatchProposer(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) +func testBatchProposerLimits(t *testing.T) { + tests := []struct { + name string + maxChunkNum uint64 + maxL1CommitGas uint64 + maxL1CommitCalldataSize uint32 + expectedBatchesLen int + expectedChunksInFirstBatch uint64 + }{ + { + name: "Timeout", + maxChunkNum: 10, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 2, + }, + { + name: "MaxL1CommitGasPerBatchIs0", + maxChunkNum: 10, + maxL1CommitGas: 0, + maxL1CommitCalldataSize: 1000000, + expectedBatchesLen: 0, + }, + { + name: "MaxL1CommitCalldataSizePerBatchIs0", + maxChunkNum: 10, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 0, + expectedBatchesLen: 0, + }, + { + name: "MaxChunkNumPerBatchIs1", + maxChunkNum: 1, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 1, + }, + { + name: "MaxL1CommitGasPerBatchIsFirstChunk", + maxChunkNum: 1, + maxL1CommitGas: 100000, + maxL1CommitCalldataSize: 1000000, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 1, + }, + { + name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk", + maxChunkNum: 1, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 298, + expectedBatchesLen: 1, + expectedChunksInFirstBatch: 1, + }, + } - l2BlockOrm := orm.NewL2Block(db) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) - assert.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - }, db, nil) - cp.TryProposeChunk() + l2BlockOrm := orm.NewL2Block(db) + err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) - bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ - MaxChunkNumPerBatch: 10, - MaxL1CommitGasPerBatch: 50000000000, - MaxL1CommitCalldataSizePerBatch: 1000000, - BatchTimeoutSec: 300, - }, db, nil) - bp.TryProposeBatch() + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: 1, + MaxTxNumPerChunk: 10000, + MaxL1CommitGasPerChunk: 50000000000, + MaxL1CommitCalldataSizePerChunk: 1000000, + MaxRowConsumptionPerChunk: 1000000, + ChunkTimeoutSec: 300, + GasCostIncreaseMultiplier: 1.2, + }, db, nil) + cp.TryProposeChunk() // chunk1 contains block1 + cp.TryProposeChunk() // chunk2 contains block2 - batchOrm := orm.NewBatch(db) - // get all batches. - batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, batches, 1) - assert.Equal(t, uint64(0), batches[0].StartChunkIndex) - assert.Equal(t, uint64(0), batches[0].EndChunkIndex) - assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) + chunkOrm := orm.NewChunk(db) + chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1) // Assuming the chunks have indices 0 and 1 + assert.NoError(t, err) + assert.Equal(t, uint64(6006), chunks[0].TotalL1CommitGas) + assert.Equal(t, uint32(298), chunks[0].TotalL1CommitCalldataSize) + assert.Equal(t, uint64(93982), chunks[1].TotalL1CommitGas) + assert.Equal(t, uint32(5735), chunks[1].TotalL1CommitCalldataSize) - chunkOrm := orm.NewChunk(db) - dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0) - assert.NoError(t, err) - assert.Len(t, batches, 1) - assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash) - assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus)) + bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{ + MaxChunkNumPerBatch: tt.maxChunkNum, + MaxL1CommitGasPerBatch: tt.maxL1CommitGas, + MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize, + BatchTimeoutSec: 300, + GasCostIncreaseMultiplier: 1.2, + }, db, nil) + bp.TryProposeBatch() - blockOrm := orm.NewL2Block(db) - blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0) - assert.NoError(t, err) - assert.Len(t, blocks, 2) - assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash) - assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash) + batchOrm := orm.NewBatch(db) + batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0) + assert.NoError(t, err) + assert.Len(t, batches, tt.expectedBatchesLen) + if tt.expectedBatchesLen > 0 { + assert.Equal(t, uint64(0), batches[0].StartChunkIndex) + assert.Equal(t, tt.expectedChunksInFirstBatch-1, batches[0].EndChunkIndex) + assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus)) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus)) + + dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, tt.expectedChunksInFirstBatch-1) + assert.NoError(t, err) + for _, chunk := range dbChunks { + assert.Equal(t, batches[0].Hash, chunk.BatchHash) + assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus)) + } + } + }) + } } diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index 6c9be6fd36..92d707c8bc 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -80,7 +80,8 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db * "maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk, "maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk, "maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk, - "chunkTimeoutSec", cfg.ChunkTimeoutSec) + "chunkTimeoutSec", cfg.ChunkTimeoutSec, + "gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier) return &ChunkProposer{ ctx: ctx, diff --git a/rollup/internal/controller/watcher/chunk_proposer_test.go b/rollup/internal/controller/watcher/chunk_proposer_test.go index 1fff539e72..8cb2d7717b 100644 --- a/rollup/internal/controller/watcher/chunk_proposer_test.go +++ b/rollup/internal/controller/watcher/chunk_proposer_test.go @@ -13,58 +13,154 @@ import ( "scroll-tech/rollup/internal/orm" ) -// TODO: Add unit tests that the limits are enforced correctly. -func testChunkProposer(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) +func testChunkProposerLimits(t *testing.T) { + tests := []struct { + name string + maxBlockNum uint64 + maxTxNum uint64 + maxL1CommitGas uint64 + maxL1CommitCalldataSize uint64 + maxRowConsumption uint64 + expectedChunksLen int + expectedBlocksNum uint64 + }{ + { + name: "Timeout", + maxBlockNum: 100, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + expectedChunksLen: 1, + expectedBlocksNum: 2, + }, + { + name: "MaxTxNumPerChunkIs0", + maxBlockNum: 10, + maxTxNum: 0, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + expectedChunksLen: 0, + }, + { + name: "MaxL1CommitGasPerChunkIs0", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 0, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + expectedChunksLen: 0, + }, + { + name: "MaxL1CommitCalldataSizePerChunkIs0", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 0, + maxRowConsumption: 1000000, + expectedChunksLen: 0, + }, + { + name: "MaxRowConsumptionPerChunkIs0", + maxBlockNum: 100, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 0, + expectedChunksLen: 0, + }, + { + name: "MaxBlockNumPerChunkIs1", + maxBlockNum: 1, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + expectedChunksLen: 1, + expectedBlocksNum: 1, + }, + { + name: "MaxTxNumPerChunkIsFirstBlock", + maxBlockNum: 10, + maxTxNum: 2, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + expectedChunksLen: 1, + expectedBlocksNum: 1, + }, + { + name: "MaxL1CommitGasPerChunkIsFirstBlock", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 60, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1000000, + expectedChunksLen: 1, + expectedBlocksNum: 1, + }, + { + name: "MaxL1CommitCalldataSizePerChunkIsFirstBlock", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 298, + maxRowConsumption: 1000000, + expectedChunksLen: 1, + expectedBlocksNum: 1, + }, + { + name: "MaxRowConsumptionPerChunkIs1", + maxBlockNum: 10, + maxTxNum: 10000, + maxL1CommitGas: 50000000000, + maxL1CommitCalldataSize: 1000000, + maxRowConsumption: 1, + expectedChunksLen: 1, + expectedBlocksNum: 1, + }, + } - l2BlockOrm := orm.NewL2Block(db) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) - assert.NoError(t, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db := setupDB(t) + defer database.CloseDB(db) - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 1048319, - ChunkTimeoutSec: 300, - }, db, nil) - cp.TryProposeChunk() + l2BlockOrm := orm.NewL2Block(db) + err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) + assert.NoError(t, err) - expectedChunk := &types.Chunk{ - Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}, - } - expectedHash, err := expectedChunk.Hash(0) - assert.NoError(t, err) + cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ + MaxBlockNumPerChunk: tt.maxBlockNum, + MaxTxNumPerChunk: tt.maxTxNum, + MaxL1CommitGasPerChunk: tt.maxL1CommitGas, + MaxL1CommitCalldataSizePerChunk: tt.maxL1CommitCalldataSize, + MaxRowConsumptionPerChunk: tt.maxRowConsumption, + ChunkTimeoutSec: 300, + GasCostIncreaseMultiplier: 1.2, + }, db, nil) + cp.TryProposeChunk() - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) - assert.NoError(t, err) - assert.Len(t, chunks, 1) - assert.Equal(t, expectedHash.Hex(), chunks[0].Hash) -} + chunkOrm := orm.NewChunk(db) + chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) + assert.NoError(t, err) + assert.Len(t, chunks, tt.expectedChunksLen) -func testChunkProposerRowConsumption(t *testing.T) { - db := setupDB(t) - defer database.CloseDB(db) + if len(chunks) > 0 { + var expectedChunk types.Chunk - l2BlockOrm := orm.NewL2Block(db) - err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2}) - assert.NoError(t, err) + switch tt.expectedBlocksNum { + case 1: + expectedChunk.Blocks = []*types.WrappedBlock{wrappedBlock1} + case 2: + expectedChunk.Blocks = []*types.WrappedBlock{wrappedBlock1, wrappedBlock2} + } - cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ - MaxBlockNumPerChunk: 100, - MaxTxNumPerChunk: 10000, - MaxL1CommitGasPerChunk: 50000000000, - MaxL1CommitCalldataSizePerChunk: 1000000, - MaxRowConsumptionPerChunk: 0, // ! - ChunkTimeoutSec: 300, - }, db, nil) - cp.TryProposeChunk() - - chunkOrm := orm.NewChunk(db) - chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0) - assert.NoError(t, err) - assert.Len(t, chunks, 0) + expectedHash, err := expectedChunk.Hash(0) + assert.NoError(t, err) + assert.Equal(t, expectedHash.Hex(), chunks[0].Hash) + } + }) + } } diff --git a/rollup/internal/controller/watcher/watcher_test.go b/rollup/internal/controller/watcher/watcher_test.go index 85d89c6986..5de910cf26 100644 --- a/rollup/internal/controller/watcher/watcher_test.go +++ b/rollup/internal/controller/watcher/watcher_test.go @@ -110,10 +110,9 @@ func TestFunction(t *testing.T) { t.Run("TestParseBridgeEventLogsL2RelayedMessageEventSignature", testParseBridgeEventLogsL2RelayedMessageEventSignature) t.Run("TestParseBridgeEventLogsL2FailedRelayedMessageEventSignature", testParseBridgeEventLogsL2FailedRelayedMessageEventSignature) - // Run chunk-proposer test cases. - t.Run("TestChunkProposer", testChunkProposer) - t.Run("TestChunkProposerRowConsumption", testChunkProposerRowConsumption) + // Run chunk proposer test cases. + t.Run("TestChunkProposerLimits", testChunkProposerLimits) - // Run batch-proposer test cases. - t.Run("TestBatchProposer", testBatchProposer) + // Run chunk proposer test cases. + t.Run("TestBatchProposerLimits", testBatchProposerLimits) }