Skip to content

Commit

Permalink
test(rollup-relayer): add chunk and batch proposer limit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
colinlyguo committed Sep 6, 2023
1 parent 33089b8 commit 2020227
Show file tree
Hide file tree
Showing 5 changed files with 261 additions and 98 deletions.
5 changes: 4 additions & 1 deletion rollup/internal/controller/watcher/batch_proposer.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec)
"batchTimeoutSec", cfg.BatchTimeoutSec,
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier)

return &BatchProposer{
ctx: ctx,
Expand Down Expand Up @@ -178,6 +179,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 21000 // base fee for tx
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata

// adjusting gas:
Expand Down Expand Up @@ -208,6 +210,7 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, er
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))

if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
Expand Down
152 changes: 108 additions & 44 deletions rollup/internal/controller/watcher/batch_proposer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,54 +13,118 @@ import (
"scroll-tech/rollup/internal/orm"
)

// TODO: Add unit tests that the limits are enforced correctly.
func testBatchProposer(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
func testBatchProposerLimits(t *testing.T) {
tests := []struct {
name string
maxChunkNum uint64
maxL1CommitGas uint64
maxL1CommitCalldataSize uint32
expectedBatchesLen int
expectedChunksInFirstBatch uint64
}{
{
name: "Timeout",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 2,
},
{
name: "MaxL1CommitGasPerBatchIs0",
maxChunkNum: 10,
maxL1CommitGas: 0,
maxL1CommitCalldataSize: 1000000,
expectedBatchesLen: 0,
},
{
name: "MaxL1CommitCalldataSizePerBatchIs0",
maxChunkNum: 10,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 0,
expectedBatchesLen: 0,
},
{
name: "MaxChunkNumPerBatchIs1",
maxChunkNum: 1,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 1000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitGasPerBatchIsFirstChunk",
maxChunkNum: 1,
maxL1CommitGas: 100000,
maxL1CommitCalldataSize: 1000000,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
{
name: "MaxL1CommitCalldataSizePerBatchIsFirstChunk",
maxChunkNum: 1,
maxL1CommitGas: 50000000000,
maxL1CommitCalldataSize: 298,
expectedBatchesLen: 1,
expectedChunksInFirstBatch: 1,
},
}

l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)

cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 100,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
cp.TryProposeChunk()
l2BlockOrm := orm.NewL2Block(db)
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
assert.NoError(t, err)

bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
BatchTimeoutSec: 300,
}, db, nil)
bp.TryProposeBatch()
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxBlockNumPerChunk: 1,
MaxTxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MaxRowConsumptionPerChunk: 1000000,
ChunkTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, db, nil)
cp.TryProposeChunk() // chunk1 contains block1
cp.TryProposeChunk() // chunk2 contains block2

batchOrm := orm.NewBatch(db)
// get all batches.
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, uint64(0), batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
chunkOrm := orm.NewChunk(db)
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 1) // Assuming the chunks have indices 0 and 1
assert.NoError(t, err)
assert.Equal(t, uint64(6006), chunks[0].TotalL1CommitGas)
assert.Equal(t, uint32(298), chunks[0].TotalL1CommitCalldataSize)
assert.Equal(t, uint64(93982), chunks[1].TotalL1CommitGas)
assert.Equal(t, uint32(5735), chunks[1].TotalL1CommitCalldataSize)

chunkOrm := orm.NewChunk(db)
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
assert.NoError(t, err)
assert.Len(t, batches, 1)
assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus))
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: tt.maxChunkNum,
MaxL1CommitGasPerBatch: tt.maxL1CommitGas,
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
BatchTimeoutSec: 300,
GasCostIncreaseMultiplier: 1.2,
}, db, nil)
bp.TryProposeBatch()

blockOrm := orm.NewL2Block(db)
blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, blocks, 2)
assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash)
assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash)
batchOrm := orm.NewBatch(db)
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
assert.NoError(t, err)
assert.Len(t, batches, tt.expectedBatchesLen)
if tt.expectedBatchesLen > 0 {
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
assert.Equal(t, tt.expectedChunksInFirstBatch-1, batches[0].EndChunkIndex)
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))

dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, tt.expectedChunksInFirstBatch-1)
assert.NoError(t, err)
for _, chunk := range dbChunks {
assert.Equal(t, batches[0].Hash, chunk.BatchHash)
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
}
}
})
}
}
3 changes: 2 additions & 1 deletion rollup/internal/controller/watcher/chunk_proposer.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,8 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier)

return &ChunkProposer{
ctx: ctx,
Expand Down
Loading

0 comments on commit 2020227

Please sign in to comment.