From 8fead4470fe44ca447b35967a3e9f14de9b916b2 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 17:39:36 +0800 Subject: [PATCH 1/5] use DarwinV2 --- common/forks/forks.go | 10 ++++------ rollup/conf/config.json | 1 - rollup/go.mod | 4 ++-- rollup/go.sum | 8 ++++---- .../controller/watcher/batch_proposer.go | 20 +++++-------------- .../controller/watcher/bundle_proposer.go | 7 +++---- .../controller/watcher/chunk_proposer.go | 5 +---- 7 files changed, 19 insertions(+), 36 deletions(-) diff --git a/common/forks/forks.go b/common/forks/forks.go index 9be2afdf4..514982b1d 100644 --- a/common/forks/forks.go +++ b/common/forks/forks.go @@ -23,19 +23,17 @@ func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uin // GetCodecVersion returns the encoding codec version for the given block height and timestamp. // It determines the appropriate codec version based on the active hardfork. -func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64, enableConditionalCompress bool) encoding.CodecVersion { +func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uint64) encoding.CodecVersion { if !config.IsBernoulli(new(big.Int).SetUint64(blockHeight)) { return encoding.CodecV0 } else if !config.IsCurie(new(big.Int).SetUint64(blockHeight)) { return encoding.CodecV1 } else if !config.IsDarwin(blockTimestamp) { return encoding.CodecV2 + } else if !config.IsDarwinV2(blockTimestamp) { + return encoding.CodecV3 } else { - if !enableConditionalCompress { - return encoding.CodecV3 - } else { - return encoding.CodecV4 - } + return encoding.CodecV4 } } diff --git a/rollup/conf/config.json b/rollup/conf/config.json index 834372131..457e42fe5 100644 --- a/rollup/conf/config.json +++ b/rollup/conf/config.json @@ -66,7 +66,6 @@ "l1_commit_gas_limit_multiplier": 1.2 }, "chunk_proposer_config": { - "enable_conditional_compress": false, "propose_interval_milliseconds": 100, "max_block_num_per_chunk": 100, "max_tx_num_per_chunk": 100, diff --git a/rollup/go.mod b/rollup/go.mod index fd13e07fb..34fd1af16 100644 --- a/rollup/go.mod +++ b/rollup/go.mod @@ -10,8 +10,8 @@ require ( github.com/go-resty/resty/v2 v2.7.0 github.com/holiman/uint256 v1.2.4 github.com/prometheus/client_golang v1.16.0 - github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068 - github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 + github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 + github.com/scroll-tech/go-ethereum v1.10.14-0.20240821075135-bdd1b005d40f github.com/smartystreets/goconvey v1.8.0 github.com/stretchr/testify v1.9.0 github.com/urfave/cli/v2 v2.25.7 diff --git a/rollup/go.sum b/rollup/go.sum index 58c0b5be9..826819749 100644 --- a/rollup/go.sum +++ b/rollup/go.sum @@ -236,10 +236,10 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068 h1:oVGwhg4cCq35B04eG/S4OBXDwXiFH7+LezuH2ZTRBPs= -github.com/scroll-tech/da-codec v0.0.0-20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6 h1:Q8YyvrcPIcXQwE4ucm4bqmPh6TP6IB1GUTXripf2WyQ= -github.com/scroll-tech/go-ethereum v1.10.14-0.20240626125436-418bc6f728b6/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ= +github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068 h1:KyTp4aedcpjr/rbntrmlhUxjrDYu1Q02QDLaF5vqpxs= +github.com/scroll-tech/da-codec v0.1.1-0.20240819100936-c6af3bbe7068/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240821075135-bdd1b005d40f h1:0XhY20/Sh2UCroZqD4orK7eDElQD2XK4GLrTbPmUBpw= +github.com/scroll-tech/go-ethereum v1.10.14-0.20240821075135-bdd1b005d40f/go.mod h1:jLTGZ5iL5T7g1BEWrQXVIR+wutJFDTVs/mCfjAlrhrA= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= diff --git a/rollup/internal/controller/watcher/batch_proposer.go b/rollup/internal/controller/watcher/batch_proposer.go index 9aa1bbf16..721223935 100644 --- a/rollup/internal/controller/watcher/batch_proposer.go +++ b/rollup/internal/controller/watcher/batch_proposer.go @@ -235,14 +235,12 @@ func (p *BatchProposer) proposeBatch() error { return nil } - // Ensure all chunks in the same batch use the same hardfork name and same codec version - // If a different hardfork name or codec version are found, truncate the chunks slice at that point + // Ensure all blocks in the same chunk use the same hardfork name + // If a different hardfork name is found, truncate the blocks slice at that point hardforkName := forks.GetHardforkName(p.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime) - codecVersion := p.getChunkCodecVersion(firstUnbatchedChunk) for i := 1; i < len(dbChunks); i++ { currentHardfork := forks.GetHardforkName(p.chainCfg, dbChunks[i].StartBlockNumber, dbChunks[i].StartBlockTime) - currentCodecVersion := p.getChunkCodecVersion(dbChunks[i]) - if currentHardfork != hardforkName || currentCodecVersion != codecVersion { + if currentHardfork != hardforkName { dbChunks = dbChunks[:i] maxChunksThisBatch = uint64(len(dbChunks)) // update maxChunksThisBatch to trigger batching, because these chunks are the last chunks before the hardfork break @@ -259,6 +257,8 @@ func (p *BatchProposer) proposeBatch() error { return err } + codecVersion := forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime) + var batch encoding.Batch batch.Index = dbParentBatch.Index + 1 batch.ParentBatchHash = common.HexToHash(dbParentBatch.Hash) @@ -358,13 +358,3 @@ func (p *BatchProposer) recordTimerBatchMetrics(metrics *utils.BatchMetrics) { p.batchEstimateCalldataSizeTime.Set(float64(metrics.EstimateCalldataSizeTime)) p.batchEstimateBlobSizeTime.Set(float64(metrics.EstimateBlobSizeTime)) } - -// getChunkCodecVersion returns the codec version for a given chunk. -// For backward compatibility, it handles cases where some chunks may not have -// codec version stored in the database. -func (p *BatchProposer) getChunkCodecVersion(firstUnbatchedChunk *orm.Chunk) encoding.CodecVersion { - if firstUnbatchedChunk.CodecVersion == -1 { - return forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime, false) - } - return encoding.CodecVersion(firstUnbatchedChunk.CodecVersion) -} diff --git a/rollup/internal/controller/watcher/bundle_proposer.go b/rollup/internal/controller/watcher/bundle_proposer.go index 145372727..6b67347b2 100644 --- a/rollup/internal/controller/watcher/bundle_proposer.go +++ b/rollup/internal/controller/watcher/bundle_proposer.go @@ -140,8 +140,8 @@ func (p *BundleProposer) proposeBundle() error { return nil } - // Ensure all blocks in the same chunk use the same hardfork name and same codec version - // If a different hardfork name or codec version are found, truncate the batches slice at that point + // Ensure all blocks in the same chunk use the same hardfork name + // If a different hardfork name is found, truncate the blocks slice at that point firstChunk, err := p.chunkOrm.GetChunkByIndex(p.ctx, batches[0].StartChunkIndex) if err != nil { return err @@ -154,8 +154,7 @@ func (p *BundleProposer) proposeBundle() error { return err } currentHardfork := forks.GetHardforkName(p.chainCfg, chunk.StartBlockNumber, chunk.StartBlockTime) - currentCodecVersion := encoding.CodecVersion(batches[i].CodecVersion) - if currentHardfork != hardforkName || currentCodecVersion != codecVersion { + if currentHardfork != hardforkName { batches = batches[:i] maxBatchesThisBundle = uint64(i) // update maxBlocksThisChunk to trigger chunking, because these blocks are the last blocks before the hardfork break diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index 73db21564..f16ce50ac 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -36,8 +36,6 @@ type ChunkProposer struct { gasCostIncreaseMultiplier float64 maxUncompressedBatchBytesSize uint64 - enableConditionalCompress bool - chainCfg *params.ChainConfig chunkProposerCircleTotal prometheus.Counter @@ -86,7 +84,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai chunkTimeoutSec: cfg.ChunkTimeoutSec, gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier, maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize, - enableConditionalCompress: cfg.EnableConditionalCompress, chainCfg: chainCfg, chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{ @@ -265,7 +262,7 @@ func (p *ChunkProposer) proposeChunk() error { } } - codecVersion := forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time, p.enableConditionalCompress) + codecVersion := forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time) // Including Curie block in a sole chunk. if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 { From 26ba82b0421a92d68bc96dae8df14f52715739e1 Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 18:03:53 +0800 Subject: [PATCH 2/5] remove EnableConditionalCompress --- rollup/internal/config/l2.go | 1 - 1 file changed, 1 deletion(-) diff --git a/rollup/internal/config/l2.go b/rollup/internal/config/l2.go index c3a244a51..4db24fb73 100644 --- a/rollup/internal/config/l2.go +++ b/rollup/internal/config/l2.go @@ -37,7 +37,6 @@ type ChunkProposerConfig struct { MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"` GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"` MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"` - EnableConditionalCompress bool `json:"enable_conditional_compress"` } // BatchProposerConfig loads batch_proposer configuration items. From e142ec4c6745ca630e643bcb9fe2ea9df3e87d2a Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 19:48:09 +0800 Subject: [PATCH 3/5] refactor: merge codec related input params --- .../internal/controller/relayer/l2_relayer.go | 4 +- .../controller/relayer/l2_relayer_test.go | 42 +++++++++--------- .../controller/watcher/batch_proposer.go | 32 ++++++++------ .../controller/watcher/batch_proposer_test.go | 44 +++++++++---------- .../watcher/bundle_proposer_test.go | 8 ++-- .../controller/watcher/chunk_proposer.go | 32 ++++++++------ rollup/internal/orm/batch.go | 8 ++-- rollup/internal/orm/chunk.go | 9 ++-- rollup/internal/orm/orm_test.go | 12 ++--- rollup/internal/utils/utils.go | 30 ++++++++----- rollup/tests/gas_oracle_test.go | 8 ++-- 11 files changed, 124 insertions(+), 105 deletions(-) diff --git a/rollup/internal/controller/relayer/l2_relayer.go b/rollup/internal/controller/relayer/l2_relayer.go index 8b5366adb..7260cb788 100644 --- a/rollup/internal/controller/relayer/l2_relayer.go +++ b/rollup/internal/controller/relayer/l2_relayer.go @@ -201,7 +201,7 @@ func (r *Layer2Relayer) initializeGenesis() error { err = r.db.Transaction(func(dbTX *gorm.DB) error { var dbChunk *orm.Chunk - dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, encoding.CodecV0, false, rutils.ChunkMetrics{}, dbTX) + dbChunk, err = r.chunkOrm.InsertChunk(r.ctx, chunk, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.ChunkMetrics{}, dbTX) if err != nil { return fmt.Errorf("failed to insert chunk: %v", err) } @@ -218,7 +218,7 @@ func (r *Layer2Relayer) initializeGenesis() error { } var dbBatch *orm.Batch - dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, encoding.CodecV0, false, rutils.BatchMetrics{}, dbTX) + dbBatch, err = r.batchOrm.InsertBatch(r.ctx, batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}, dbTX) if err != nil { return fmt.Errorf("failed to insert batch: %v", err) } diff --git a/rollup/internal/controller/relayer/l2_relayer_test.go b/rollup/internal/controller/relayer/l2_relayer_test.go index 4eaaa80af..593dad435 100644 --- a/rollup/internal/controller/relayer/l2_relayer_test.go +++ b/rollup/internal/controller/relayer/l2_relayer_test.go @@ -79,9 +79,9 @@ func testL2RelayerProcessPendingBatches(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) chunkOrm := orm.NewChunk(db) - _, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, false, rutils.ChunkMetrics{}) + _, err = chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) - _, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, false, rutils.ChunkMetrics{}) + _, err = chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ @@ -92,7 +92,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{}) assert.NoError(t, err) relayer.ProcessPendingBatches() @@ -128,9 +128,9 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) chunkOrm := orm.NewChunk(db) - _, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, false, rutils.ChunkMetrics{}) + _, err = chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) - _, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, false, rutils.ChunkMetrics{}) + _, err = chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ @@ -141,7 +141,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{}) assert.NoError(t, err) err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) @@ -197,7 +197,7 @@ func testL2RelayerProcessPendingBundles(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{}) assert.NoError(t, err) bundleOrm := orm.NewBundle(db) @@ -259,9 +259,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) chunkOrm := orm.NewChunk(db) - chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, false, rutils.ChunkMetrics{}) + chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) - chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, false, rutils.ChunkMetrics{}) + chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ @@ -272,7 +272,7 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{}) assert.NoError(t, err) err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) @@ -326,9 +326,9 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) chunkOrm := orm.NewChunk(db) - chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, false, rutils.ChunkMetrics{}) + chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) - chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, false, rutils.ChunkMetrics{}) + chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: codecVersion}, rutils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ @@ -339,7 +339,7 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: codecVersion}, rutils.BatchMetrics{}) assert.NoError(t, err) err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) @@ -411,7 +411,7 @@ func testL2RelayerCommitConfirm(t *testing.T) { Chunks: []*encoding.Chunk{chunk1, chunk2}, } - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}) assert.NoError(t, err) batchHashes[i] = dbBatch.Hash } @@ -467,7 +467,7 @@ func testL2RelayerFinalizeBatchConfirm(t *testing.T) { Chunks: []*encoding.Chunk{chunk1, chunk2}, } - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}) assert.NoError(t, err) batchHashes[i] = dbBatch.Hash } @@ -525,7 +525,7 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) { Chunks: []*encoding.Chunk{chunk1, chunk2}, } - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}) assert.NoError(t, err) batchHashes[i] = dbBatch.Hash @@ -580,7 +580,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0, false, rutils.BatchMetrics{}) + dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}) assert.NoError(t, err) batch2 := &encoding.Batch{ @@ -590,7 +590,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) { Chunks: []*encoding.Chunk{chunk2}, } - dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0, false, rutils.BatchMetrics{}) + dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}) assert.NoError(t, err) // Create and set up the Layer2 Relayer. @@ -742,9 +742,9 @@ func testGetBatchStatusByIndex(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) chunkOrm := orm.NewChunk(db) - _, err = chunkOrm.InsertChunk(context.Background(), chunk1, encoding.CodecV0, false, rutils.ChunkMetrics{}) + _, err = chunkOrm.InsertChunk(context.Background(), chunk1, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.ChunkMetrics{}) assert.NoError(t, err) - _, err = chunkOrm.InsertChunk(context.Background(), chunk2, encoding.CodecV0, false, rutils.ChunkMetrics{}) + _, err = chunkOrm.InsertChunk(context.Background(), chunk2, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ @@ -755,7 +755,7 @@ func testGetBatchStatusByIndex(t *testing.T) { } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, rutils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, rutils.CodecConfig{Version: encoding.CodecV0}, rutils.BatchMetrics{}) assert.NoError(t, err) status, err := relayer.getBatchStatusByIndex(dbBatch) diff --git a/rollup/internal/controller/watcher/batch_proposer.go b/rollup/internal/controller/watcher/batch_proposer.go index 721223935..08a9c17e0 100644 --- a/rollup/internal/controller/watcher/batch_proposer.go +++ b/rollup/internal/controller/watcher/batch_proposer.go @@ -151,7 +151,10 @@ func (p *BatchProposer) TryProposeBatch() { func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion encoding.CodecVersion, metrics *utils.BatchMetrics) error { compatibilityBreachOccurred := false - enableCompress := true + codecConfig := utils.CodecConfig{ + Version: codecVersion, + EnableCompress: true, // codecv4 is the only version that supports conditional compression, default to enable compression + } for { compatible, err := utils.CheckBatchCompressedDataCompatibility(batch, codecVersion) @@ -169,7 +172,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en if len(batch.Chunks) == 1 { log.Warn("Disable compression: cannot truncate batch with only 1 chunk for compatibility", "start block number", batch.Chunks[0].Blocks[0].Header.Number.Uint64(), "end block number", batch.Chunks[0].Blocks[len(batch.Chunks[0].Blocks)-1].Header.Number.Uint64()) - enableCompress = false + codecConfig.EnableCompress = false break } @@ -183,7 +186,7 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en // recalculate batch metrics after truncation var calcErr error - metrics, calcErr = utils.CalculateBatchMetrics(batch, codecVersion, enableCompress) + metrics, calcErr = utils.CalculateBatchMetrics(batch, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate batch metrics, batch index: %v, error: %w", batch.Index, calcErr) } @@ -194,9 +197,9 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en p.proposeBatchUpdateInfoTotal.Inc() err := p.db.Transaction(func(dbTX *gorm.DB) error { - dbBatch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecVersion, enableCompress, *metrics, dbTX) + dbBatch, dbErr := p.batchOrm.InsertBatch(p.ctx, batch, codecConfig, *metrics, dbTX) if dbErr != nil { - log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "index", batch.Index, "parent hash", batch.ParentBatchHash.Hex(), "codec version", codecVersion, "enable compress", enableCompress, "error", dbErr) + log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "index", batch.Index, "parent hash", batch.ParentBatchHash.Hex(), "codec version", codecVersion, "enable compress", codecConfig.EnableCompress, "error", dbErr) return dbErr } if dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, dbBatch.StartChunkIndex, dbBatch.EndChunkIndex, dbBatch.Hash, dbTX); dbErr != nil { @@ -235,8 +238,8 @@ func (p *BatchProposer) proposeBatch() error { return nil } - // Ensure all blocks in the same chunk use the same hardfork name - // If a different hardfork name is found, truncate the blocks slice at that point + // Ensure all chunks in the same batch use the same hardfork name + // If a different hardfork name is found, truncate the chunks slice at that point hardforkName := forks.GetHardforkName(p.chainCfg, dbChunks[0].StartBlockNumber, dbChunks[0].StartBlockTime) for i := 1; i < len(dbChunks); i++ { currentHardfork := forks.GetHardforkName(p.chainCfg, dbChunks[i].StartBlockNumber, dbChunks[i].StartBlockTime) @@ -257,7 +260,10 @@ func (p *BatchProposer) proposeBatch() error { return err } - codecVersion := forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime) + codecConfig := utils.CodecConfig{ + Version: forks.GetCodecVersion(p.chainCfg, firstUnbatchedChunk.StartBlockNumber, firstUnbatchedChunk.StartBlockTime), + EnableCompress: true, // codecv4 is the only version that supports conditional compression, default to enable compression + } var batch encoding.Batch batch.Index = dbParentBatch.Index + 1 @@ -266,7 +272,7 @@ func (p *BatchProposer) proposeBatch() error { for i, chunk := range daChunks { batch.Chunks = append(batch.Chunks, chunk) - metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion, true /* enable compress for codecv4 */) + metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate batch metrics: %w", calcErr) } @@ -295,17 +301,17 @@ func (p *BatchProposer) proposeBatch() error { batch.Chunks = batch.Chunks[:len(batch.Chunks)-1] - metrics, err := utils.CalculateBatchMetrics(&batch, codecVersion, true /* enable compress for codecv4 */) + metrics, err := utils.CalculateBatchMetrics(&batch, codecConfig) if err != nil { return fmt.Errorf("failed to calculate batch metrics: %w", err) } p.recordAllBatchMetrics(metrics) - return p.updateDBBatchInfo(&batch, codecVersion, metrics) + return p.updateDBBatchInfo(&batch, codecConfig.Version, metrics) } } - metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecVersion, true /* enable compress for codecv4 */) + metrics, calcErr := utils.CalculateBatchMetrics(&batch, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate batch metrics: %w", calcErr) } @@ -319,7 +325,7 @@ func (p *BatchProposer) proposeBatch() error { p.batchFirstBlockTimeoutReached.Inc() p.recordAllBatchMetrics(metrics) - return p.updateDBBatchInfo(&batch, codecVersion, metrics) + return p.updateDBBatchInfo(&batch, codecConfig.Version, metrics) } log.Debug("pending chunks do not reach one of the constraints or contain a timeout block") diff --git a/rollup/internal/controller/watcher/batch_proposer_test.go b/rollup/internal/controller/watcher/batch_proposer_test.go index c38329192..a4f32a8fd 100644 --- a/rollup/internal/controller/watcher/batch_proposer_test.go +++ b/rollup/internal/controller/watcher/batch_proposer_test.go @@ -92,7 +92,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -101,7 +101,7 @@ func testBatchProposerCodecv0Limits(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -229,7 +229,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -238,7 +238,7 @@ func testBatchProposerCodecv1Limits(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -370,7 +370,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -379,7 +379,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -515,7 +515,7 @@ func testBatchProposerCodecv3Limits(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -524,7 +524,7 @@ func testBatchProposerCodecv3Limits(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -605,7 +605,7 @@ func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -614,7 +614,7 @@ func testBatchCommitGasAndCalldataSizeCodecv0Estimation(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -684,7 +684,7 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -693,7 +693,7 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -763,7 +763,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -772,7 +772,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -844,7 +844,7 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -853,7 +853,7 @@ func testBatchCommitGasAndCalldataSizeCodecv3Estimation(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -926,7 +926,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -935,7 +935,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) var chainConfig *params.ChainConfig @@ -1029,7 +1029,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -1038,7 +1038,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) var expectedChunkNum uint64 @@ -1117,7 +1117,7 @@ func testBatchProposerRespectHardforks(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -1126,7 +1126,7 @@ func testBatchProposerRespectHardforks(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ diff --git a/rollup/internal/controller/watcher/bundle_proposer_test.go b/rollup/internal/controller/watcher/bundle_proposer_test.go index 5f9d9d372..ab575c737 100644 --- a/rollup/internal/controller/watcher/bundle_proposer_test.go +++ b/rollup/internal/controller/watcher/bundle_proposer_test.go @@ -72,7 +72,7 @@ func testBundleProposerLimits(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -81,7 +81,7 @@ func testBundleProposerLimits(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) l2BlockOrm := orm.NewL2Block(db) @@ -156,7 +156,7 @@ func testBundleProposerRespectHardforks(t *testing.T) { Blocks: []*encoding.Block{block}, } chunkOrm := orm.NewChunk(db) - _, err := chunkOrm.InsertChunk(context.Background(), chunk, encoding.CodecV0, false, utils.ChunkMetrics{}) + _, err := chunkOrm.InsertChunk(context.Background(), chunk, utils.CodecConfig{Version: encoding.CodecV0}, utils.ChunkMetrics{}) assert.NoError(t, err) batch := &encoding.Batch{ Index: 0, @@ -165,7 +165,7 @@ func testBundleProposerRespectHardforks(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{ diff --git a/rollup/internal/controller/watcher/chunk_proposer.go b/rollup/internal/controller/watcher/chunk_proposer.go index f16ce50ac..7395d5364 100644 --- a/rollup/internal/controller/watcher/chunk_proposer.go +++ b/rollup/internal/controller/watcher/chunk_proposer.go @@ -171,7 +171,10 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en } compatibilityBreachOccurred := false - enableCompress := true + codecConfig := utils.CodecConfig{ + Version: codecVersion, + EnableCompress: true, + } for { compatible, err := utils.CheckChunkCompressedDataCompatibility(chunk, codecVersion) @@ -188,7 +191,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en if len(chunk.Blocks) == 1 { log.Warn("Disable compression: cannot truncate chunk with only 1 block for compatibility", "block number", chunk.Blocks[0].Header.Number) - enableCompress = false + codecConfig.EnableCompress = false } chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] @@ -201,7 +204,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en // recalculate chunk metrics after truncation var calcErr error - metrics, calcErr = utils.CalculateChunkMetrics(chunk, codecVersion, enableCompress) + metrics, calcErr = utils.CalculateChunkMetrics(chunk, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate chunk metrics, start block number: %v, error: %w", chunk.Blocks[0].Header.Number, calcErr) } @@ -212,9 +215,9 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en p.proposeChunkUpdateInfoTotal.Inc() err := p.db.Transaction(func(dbTX *gorm.DB) error { - dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecVersion, enableCompress, *metrics, dbTX) + dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, codecConfig, *metrics, dbTX) if err != nil { - log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "enable compress", enableCompress, "err", err) + log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "enable compress", codecConfig.EnableCompress, "err", err) return err } if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil { @@ -262,24 +265,27 @@ func (p *ChunkProposer) proposeChunk() error { } } - codecVersion := forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time) + codecConfig := utils.CodecConfig{ + Version: forks.GetCodecVersion(p.chainCfg, blocks[0].Header.Number.Uint64(), blocks[0].Header.Time), + EnableCompress: true, // codecv4 is the only version that supports conditional compression, default to enable compression + } // Including Curie block in a sole chunk. if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 { chunk := encoding.Chunk{Blocks: blocks[:1]} - metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion, true /* enable compress for codecv4 */) + metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr) } p.recordTimerChunkMetrics(metrics) - return p.updateDBChunkInfo(&chunk, codecVersion, metrics) + return p.updateDBChunkInfo(&chunk, codecConfig.Version, metrics) } var chunk encoding.Chunk for i, block := range blocks { chunk.Blocks = append(chunk.Blocks, block) - metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion, true /* enable compress for codecv4 */) + metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr) } @@ -316,17 +322,17 @@ func (p *ChunkProposer) proposeChunk() error { chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] - metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion, true /* enable compress for codecv4 */) + metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr) } p.recordAllChunkMetrics(metrics) - return p.updateDBChunkInfo(&chunk, codecVersion, metrics) + return p.updateDBChunkInfo(&chunk, codecConfig.Version, metrics) } } - metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion, true /* enable compress for codecv4 */) + metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecConfig) if calcErr != nil { return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr) } @@ -341,7 +347,7 @@ func (p *ChunkProposer) proposeChunk() error { p.chunkFirstBlockTimeoutReached.Inc() p.recordAllChunkMetrics(metrics) - return p.updateDBChunkInfo(&chunk, codecVersion, metrics) + return p.updateDBChunkInfo(&chunk, codecConfig.Version, metrics) } log.Debug("pending blocks do not reach one of the constraints or contain a timeout block") diff --git a/rollup/internal/orm/batch.go b/rollup/internal/orm/batch.go index 3b395017d..ffd128b7b 100644 --- a/rollup/internal/orm/batch.go +++ b/rollup/internal/orm/batch.go @@ -249,7 +249,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro } // InsertBatch inserts a new batch into the database. -func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVersion encoding.CodecVersion, enableCompress bool, metrics rutils.BatchMetrics, dbTX ...*gorm.DB) (*Batch, error) { +func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecConfig rutils.CodecConfig, metrics rutils.BatchMetrics, dbTX ...*gorm.DB) (*Batch, error) { if batch == nil { return nil, errors.New("invalid args: batch is nil") } @@ -270,7 +270,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer startChunkIndex = parentBatch.EndChunkIndex + 1 } - batchMeta, err := rutils.GetBatchMetadata(batch, codecVersion, enableCompress) + batchMeta, err := rutils.GetBatchMetadata(batch, codecConfig) if err != nil { log.Error("failed to get batch metadata", "index", batch.Index, "total l1 message popped before", batch.TotalL1MessagePoppedBefore, "parent hash", batch.ParentBatchHash, "number of chunks", numChunks, "err", err) @@ -289,8 +289,8 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer WithdrawRoot: batch.WithdrawRoot().Hex(), ParentBatchHash: batch.ParentBatchHash.Hex(), BatchHeader: batchMeta.BatchBytes, - CodecVersion: int16(codecVersion), - EnableCompress: enableCompress, + CodecVersion: int16(codecConfig.Version), + EnableCompress: codecConfig.EnableCompress, ChunkProofsStatus: int16(types.ChunkProofsStatusPending), ProvingStatus: int16(types.ProvingTaskUnassigned), RollupStatus: int16(types.RollupPending), diff --git a/rollup/internal/orm/chunk.go b/rollup/internal/orm/chunk.go index cf0dbfbee..acae796d3 100644 --- a/rollup/internal/orm/chunk.go +++ b/rollup/internal/orm/chunk.go @@ -13,6 +13,7 @@ import ( "scroll-tech/common/types" "scroll-tech/rollup/internal/utils" + rutils "scroll-tech/rollup/internal/utils" ) // Chunk represents a chunk of blocks in the database. @@ -177,7 +178,7 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]* } // InsertChunk inserts a new chunk into the database. -func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, enableCompress bool, metrics utils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) { +func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecConfig rutils.CodecConfig, metrics utils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) { if chunk == nil || len(chunk.Blocks) == 0 { return nil, errors.New("invalid args") } @@ -202,7 +203,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer parentChunkStateRoot = parentChunk.StateRoot } - chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion) + chunkHash, err := utils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecConfig.Version) if err != nil { log.Error("failed to get chunk hash", "err", err) return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err) @@ -227,8 +228,8 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer StateRoot: chunk.Blocks[numBlocks-1].Header.Root.Hex(), ParentChunkStateRoot: parentChunkStateRoot, WithdrawRoot: chunk.Blocks[numBlocks-1].WithdrawRoot.Hex(), - CodecVersion: int16(codecVersion), - EnableCompress: enableCompress, + CodecVersion: int16(codecConfig.Version), + EnableCompress: codecConfig.EnableCompress, ProvingStatus: int16(types.ProvingTaskUnassigned), CrcMax: metrics.CrcMax, BlobSize: metrics.L1CommitBlobSize, diff --git a/rollup/internal/orm/orm_test.go b/rollup/internal/orm/orm_test.go index 42e17ca6b..d35f561fb 100644 --- a/rollup/internal/orm/orm_test.go +++ b/rollup/internal/orm/orm_test.go @@ -221,11 +221,11 @@ func TestChunkOrm(t *testing.T) { assert.NoError(t, err) } - dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion, false, utils.ChunkMetrics{}) + dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, utils.CodecConfig{Version: encoding.CodecV3}, utils.ChunkMetrics{}) assert.NoError(t, err) assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) - dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion, false, utils.ChunkMetrics{}) + dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, utils.CodecConfig{Version: encoding.CodecV3}, utils.ChunkMetrics{}) assert.NoError(t, err) assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex()) @@ -275,7 +275,7 @@ func TestBatchOrm(t *testing.T) { Index: 0, Chunks: []*encoding.Chunk{chunk1}, } - batch1, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, utils.BatchMetrics{}) + batch1, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: codecVersion}, utils.BatchMetrics{}) assert.NoError(t, err) hash1 := batch1.Hash @@ -306,7 +306,7 @@ func TestBatchOrm(t *testing.T) { Index: 1, Chunks: []*encoding.Chunk{chunk2}, } - batch2, err := batchOrm.InsertBatch(context.Background(), batch, codecVersion, false, utils.BatchMetrics{}) + batch2, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: codecVersion}, utils.BatchMetrics{}) assert.NoError(t, err) hash2 := batch2.Hash @@ -432,7 +432,7 @@ func TestBundleOrm(t *testing.T) { Index: 0, Chunks: []*encoding.Chunk{chunk1}, } - dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV3, false, utils.BatchMetrics{}) + dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, utils.CodecConfig{Version: encoding.CodecV3}, utils.BatchMetrics{}) assert.NoError(t, err) chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} @@ -440,7 +440,7 @@ func TestBundleOrm(t *testing.T) { Index: 1, Chunks: []*encoding.Chunk{chunk2}, } - dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV3, false, utils.BatchMetrics{}) + dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, utils.CodecConfig{Version: encoding.CodecV3}, utils.BatchMetrics{}) assert.NoError(t, err) var bundle1 *Bundle diff --git a/rollup/internal/utils/utils.go b/rollup/internal/utils/utils.go index 6d400844c..b5ab2aa35 100644 --- a/rollup/internal/utils/utils.go +++ b/rollup/internal/utils/utils.go @@ -13,6 +13,12 @@ import ( "github.com/scroll-tech/go-ethereum/common" ) +// CodecConfig holds the configuration for codec-related operations +type CodecConfig struct { + Version encoding.CodecVersion + EnableCompress bool +} + // ChunkMetrics indicates the metrics for proposing a chunk. type ChunkMetrics struct { // common metrics @@ -37,7 +43,7 @@ type ChunkMetrics struct { } // CalculateChunkMetrics calculates chunk metrics. -func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVersion, enableCompress bool) (*ChunkMetrics, error) { +func CalculateChunkMetrics(chunk *encoding.Chunk, codecConfig CodecConfig) (*ChunkMetrics, error) { var err error metrics := &ChunkMetrics{ TxNum: chunk.NumTransactions(), @@ -48,7 +54,7 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer if err != nil { return nil, fmt.Errorf("failed to get crc max: %w", err) } - switch codecVersion { + switch codecConfig.Version { case encoding.CodecV0: start := time.Now() metrics.L1CommitGas, err = codecv0.EstimateChunkL1CommitGas(chunk) @@ -122,14 +128,14 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer metrics.EstimateCalldataSizeTime = time.Since(start) start = time.Now() - metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv4.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk, enableCompress) + metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv4.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk, codecConfig.EnableCompress) metrics.EstimateBlobSizeTime = time.Since(start) if err != nil { return nil, fmt.Errorf("failed to estimate codecv4 chunk L1 commit batch size and blob size: %w", err) } return metrics, nil default: - return nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + return nil, fmt.Errorf("unsupported codec version: %v", codecConfig.Version) } } @@ -187,13 +193,13 @@ type BatchMetrics struct { } // CalculateBatchMetrics calculates batch metrics. -func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVersion, enableCompress bool) (*BatchMetrics, error) { +func CalculateBatchMetrics(batch *encoding.Batch, codecConfig CodecConfig) (*BatchMetrics, error) { var err error metrics := &BatchMetrics{ NumChunks: uint64(len(batch.Chunks)), FirstBlockTimestamp: batch.Chunks[0].Blocks[0].Header.Time, } - switch codecVersion { + switch codecConfig.Version { case encoding.CodecV0: start := time.Now() metrics.L1CommitGas, err = codecv0.EstimateBatchL1CommitGas(batch) @@ -266,14 +272,14 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer metrics.EstimateCalldataSizeTime = time.Since(start) start = time.Now() - metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv4.EstimateBatchL1CommitBatchSizeAndBlobSize(batch, enableCompress) + metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv4.EstimateBatchL1CommitBatchSizeAndBlobSize(batch, codecConfig.EnableCompress) metrics.EstimateBlobSizeTime = time.Since(start) if err != nil { return nil, fmt.Errorf("failed to estimate codecv4 batch L1 commit batch size and blob size: %w", err) } return metrics, nil default: - return nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + return nil, fmt.Errorf("unsupported codec version: %v", codecConfig.Version) } } @@ -349,14 +355,14 @@ type BatchMetadata struct { // TODO: refactor this function to reduce cyclomatic complexity // //gocyclo:ignore -func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, enableCompress bool) (*BatchMetadata, error) { +func GetBatchMetadata(batch *encoding.Batch, codecConfig CodecConfig) (*BatchMetadata, error) { numChunks := len(batch.Chunks) totalL1MessagePoppedBeforeEndDAChunk := batch.TotalL1MessagePoppedBefore for i := 0; i < numChunks-1; i++ { totalL1MessagePoppedBeforeEndDAChunk += batch.Chunks[i].NumL1Messages(totalL1MessagePoppedBeforeEndDAChunk) } - switch codecVersion { + switch codecConfig.Version { case encoding.CodecV0: daBatch, err := codecv0.NewDABatch(batch) if err != nil { @@ -505,7 +511,7 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, } return batchMeta, nil case encoding.CodecV4: - daBatch, err := codecv4.NewDABatch(batch, enableCompress) + daBatch, err := codecv4.NewDABatch(batch, codecConfig.EnableCompress) if err != nil { return nil, fmt.Errorf("failed to create codecv4 DA batch: %w", err) } @@ -543,6 +549,6 @@ func GetBatchMetadata(batch *encoding.Batch, codecVersion encoding.CodecVersion, } return batchMeta, nil default: - return nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + return nil, fmt.Errorf("unsupported codec version: %v", codecConfig.Version) } } diff --git a/rollup/tests/gas_oracle_test.go b/rollup/tests/gas_oracle_test.go index e97425571..405fc2f5e 100644 --- a/rollup/tests/gas_oracle_test.go +++ b/rollup/tests/gas_oracle_test.go @@ -80,7 +80,7 @@ func testImportL1GasPrice(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted) assert.NoError(t, err) @@ -153,7 +153,7 @@ func testImportL1GasPriceAfterCurie(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted) assert.NoError(t, err) @@ -228,7 +228,7 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) { Chunks: []*encoding.Chunk{chunk}, } batchOrm := orm.NewBatch(db) - dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + dbBatch, err := batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), dbBatch.Hash, common.Hash{}.String(), types.RollupCommitted) assert.NoError(t, err) @@ -301,7 +301,7 @@ func testImportL2GasPrice(t *testing.T) { } batchOrm := orm.NewBatch(db) - _, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, false, utils.BatchMetrics{}) + _, err = batchOrm.InsertBatch(context.Background(), batch, utils.CodecConfig{Version: encoding.CodecV0}, utils.BatchMetrics{}) assert.NoError(t, err) // check db status From 067009cbfdf19d04eb17b14eefdb277e3afd5d3f Mon Sep 17 00:00:00 2001 From: colinlyguo Date: Wed, 21 Aug 2024 20:58:38 +0800 Subject: [PATCH 4/5] fix tests --- rollup/internal/orm/orm_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rollup/internal/orm/orm_test.go b/rollup/internal/orm/orm_test.go index d35f561fb..96dbb98dc 100644 --- a/rollup/internal/orm/orm_test.go +++ b/rollup/internal/orm/orm_test.go @@ -221,11 +221,11 @@ func TestChunkOrm(t *testing.T) { assert.NoError(t, err) } - dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, utils.CodecConfig{Version: encoding.CodecV3}, utils.ChunkMetrics{}) + dbChunk1, err := chunkOrm.InsertChunk(context.Background(), chunk1, utils.CodecConfig{Version: codecVersion}, utils.ChunkMetrics{}) assert.NoError(t, err) assert.Equal(t, dbChunk1.Hash, chunkHash1.Hex()) - dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, utils.CodecConfig{Version: encoding.CodecV3}, utils.ChunkMetrics{}) + dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2, utils.CodecConfig{Version: codecVersion}, utils.ChunkMetrics{}) assert.NoError(t, err) assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex()) From 517469a55dbb518c7742f0b2f5b1d3c231bf906e Mon Sep 17 00:00:00 2001 From: Mengran Lan Date: Thu, 22 Aug 2024 16:03:20 +0800 Subject: [PATCH 5/5] feat(prover): log utilization (#1491) --- prover/src/coordinator_client.rs | 2 ++ prover/src/coordinator_client/api.rs | 22 ++++++++++++++++++++-- prover/src/coordinator_client/errors.rs | 12 ++++++++++++ prover/src/main.rs | 9 ++++++++- prover/src/task_processor.rs | 25 ++++++++++++++++++------- prover/src/zk_circuits_handler.rs | 2 -- 6 files changed, 60 insertions(+), 12 deletions(-) diff --git a/prover/src/coordinator_client.rs b/prover/src/coordinator_client.rs index 56fd58954..46067d7cc 100644 --- a/prover/src/coordinator_client.rs +++ b/prover/src/coordinator_client.rs @@ -14,6 +14,8 @@ use types::*; use crate::{config::Config, key_signer::KeySigner}; +pub use errors::ProofStatusNotOKError; + pub struct CoordinatorClient<'a> { api: Api, token: Option, diff --git a/prover/src/coordinator_client/api.rs b/prover/src/coordinator_client/api.rs index 53fdf2d49..905a1e61c 100644 --- a/prover/src/coordinator_client/api.rs +++ b/prover/src/coordinator_client/api.rs @@ -1,4 +1,6 @@ -use super::types::*; +use crate::{coordinator_client::ProofStatusNotOKError, types::ProofStatus}; + +use super::{errors::*, types::*}; use anyhow::{bail, Result}; use core::time::Duration; use reqwest::{header::CONTENT_TYPE, Url}; @@ -76,7 +78,23 @@ impl Api { token: &String, ) -> Result> { let method = "/coordinator/v1/submit_proof"; - self.post_with_token(method, req, token).await + let response = self + .post_with_token::>( + method, req, token, + ) + .await?; + + // when req's status already not ok, we mark the error returned from coordinator and will + // ignore it later. + if response.errcode == ErrorCode::ErrCoordinatorHandleZkProofFailure + && req.status != ProofStatus::Ok + && response + .errmsg + .contains("validator failure proof msg status not ok") + { + return Err(anyhow::anyhow!(ProofStatusNotOKError)); + } + Ok(response) } async fn post_with_token( diff --git a/prover/src/coordinator_client/errors.rs b/prover/src/coordinator_client/errors.rs index 2d27b3cd8..9bad256fa 100644 --- a/prover/src/coordinator_client/errors.rs +++ b/prover/src/coordinator_client/errors.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Deserializer}; +use std::fmt; #[derive(Debug, Clone, Copy, PartialEq)] pub enum ErrorCode { @@ -51,3 +52,14 @@ impl<'de> Deserialize<'de> for ErrorCode { Ok(ErrorCode::from_i32(v)) } } + +// ==================================================== + +#[derive(Debug, Clone)] +pub struct ProofStatusNotOKError; + +impl fmt::Display for ProofStatusNotOKError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "proof status not ok") + } +} diff --git a/prover/src/main.rs b/prover/src/main.rs index 22605a7a5..d1bf82830 100644 --- a/prover/src/main.rs +++ b/prover/src/main.rs @@ -37,7 +37,7 @@ struct Args { log_file: Option, } -fn main() -> Result<(), Box> { +fn start() -> Result<()> { let args = Args::parse(); if args.version { @@ -76,3 +76,10 @@ fn main() -> Result<(), Box> { Ok(()) } + +fn main() { + let result = start(); + if let Err(e) = result { + log::error!("main exit with error {:#}", e) + } +} diff --git a/prover/src/task_processor.rs b/prover/src/task_processor.rs index 3b4044f96..df4629d5b 100644 --- a/prover/src/task_processor.rs +++ b/prover/src/task_processor.rs @@ -1,4 +1,4 @@ -use super::{prover::Prover, task_cache::TaskCache}; +use super::{coordinator_client::ProofStatusNotOKError, prover::Prover, task_cache::TaskCache}; use anyhow::{Context, Result}; use std::rc::Rc; @@ -16,7 +16,11 @@ impl<'a> TaskProcessor<'a> { loop { log::info!("start a new round."); if let Err(err) = self.prove_and_submit() { - log::error!("encounter error: {:#}", err); + if err.is::() { + log::info!("proof status not ok, downgrade level to info."); + } else { + log::error!("encounter error: {:#}", err); + } } else { log::info!("prove & submit succeed."); } @@ -54,11 +58,18 @@ impl<'a> TaskProcessor<'a> { ); let result = match self.prover.prove_task(&task_wrapper.task) { Ok(proof_detail) => self.prover.submit_proof(proof_detail, &task_wrapper.task), - Err(error) => self.prover.submit_error( - &task_wrapper.task, - super::types::ProofFailureType::NoPanic, - error, - ), + Err(error) => { + log::error!( + "failed to prove task, id: {}, error: {:#}", + &task_wrapper.task.id, + error + ); + self.prover.submit_error( + &task_wrapper.task, + super::types::ProofFailureType::NoPanic, + error, + ) + } }; return result; } diff --git a/prover/src/zk_circuits_handler.rs b/prover/src/zk_circuits_handler.rs index db254e2ab..ac2a43e38 100644 --- a/prover/src/zk_circuits_handler.rs +++ b/prover/src/zk_circuits_handler.rs @@ -119,7 +119,6 @@ impl<'a> CircuitsHandlerProvider<'a> { if let Some(handler) = &self.current_circuit { Ok(handler.clone()) } else { - log::error!("missing cached handler, there must be something wrong."); bail!("missing cached handler, there must be something wrong.") } } @@ -136,7 +135,6 @@ impl<'a> CircuitsHandlerProvider<'a> { self.current_circuit = Some(rc_handler.clone()); Ok(rc_handler) } else { - log::error!("missing builder, there must be something wrong."); bail!("missing builder, there must be something wrong.") } }