diff --git a/bridge-history-api/internal/config/config.go b/bridge-history-api/internal/config/config.go index d52a112ba4..d82bf87851 100644 --- a/bridge-history-api/internal/config/config.go +++ b/bridge-history-api/internal/config/config.go @@ -23,6 +23,7 @@ type FetcherConfig struct { DAIGatewayAddr string `json:"DAIGatewayAddr"` USDCGatewayAddr string `json:"USDCGatewayAddr"` LIDOGatewayAddr string `json:"LIDOGatewayAddr"` + PufferGatewayAddr string `json:"PufferGatewayAddr"` ERC721GatewayAddr string `json:"ERC721GatewayAddr"` ERC1155GatewayAddr string `json:"ERC1155GatewayAddr"` ScrollChainAddr string `json:"ScrollChainAddr"` diff --git a/bridge-history-api/internal/logic/l1_fetcher.go b/bridge-history-api/internal/logic/l1_fetcher.go index d9e42a48df..f2436e6ad1 100644 --- a/bridge-history-api/internal/logic/l1_fetcher.go +++ b/bridge-history-api/internal/logic/l1_fetcher.go @@ -93,6 +93,11 @@ func NewL1FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr)) } + if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) { + addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr)) + gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr)) + } + log.Info("L1 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList) f := &L1FetcherLogic{ diff --git a/bridge-history-api/internal/logic/l2_fetcher.go b/bridge-history-api/internal/logic/l2_fetcher.go index ec13e70ba6..24b583cb51 100644 --- a/bridge-history-api/internal/logic/l2_fetcher.go +++ b/bridge-history-api/internal/logic/l2_fetcher.go @@ -85,7 +85,12 @@ func NewL2FetcherLogic(cfg *config.FetcherConfig, db *gorm.DB, client *ethclient if common.HexToAddress(cfg.LIDOGatewayAddr) != (common.Address{}) { addressList = append(addressList, common.HexToAddress(cfg.LIDOGatewayAddr)) - gatewayList = append(gatewayList, common.HexToAddress(cfg.USDCGatewayAddr)) + gatewayList = append(gatewayList, common.HexToAddress(cfg.LIDOGatewayAddr)) + } + + if common.HexToAddress(cfg.PufferGatewayAddr) != (common.Address{}) { + addressList = append(addressList, common.HexToAddress(cfg.PufferGatewayAddr)) + gatewayList = append(gatewayList, common.HexToAddress(cfg.PufferGatewayAddr)) } log.Info("L2 Fetcher configured with the following address list", "addresses", addressList, "gateways", gatewayList) diff --git a/common/libzkp/impl/Cargo.lock b/common/libzkp/impl/Cargo.lock index 6bbd5ec691..330e87ad0b 100644 --- a/common/libzkp/impl/Cargo.lock +++ b/common/libzkp/impl/Cargo.lock @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "aggregator" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "ark-std 0.3.0", "c-kzg", @@ -521,7 +521,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" [[package]] name = "bus-mapping" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "eth-types", "ethers-core", @@ -535,7 +535,7 @@ dependencies = [ "mock", "mpt-zktrie", "num", - "poseidon-circuit", + "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)", "rand", "revm-precompile", "serde", @@ -1139,7 +1139,7 @@ dependencies = [ [[package]] name = "eth-types" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "base64 0.13.1", "ethers-core", @@ -1150,7 +1150,7 @@ dependencies = [ "itertools 0.11.0", "num", "num-bigint", - "poseidon-circuit", + "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)", "regex", "serde", "serde_json", @@ -1293,7 +1293,7 @@ dependencies = [ [[package]] name = "external-tracer" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "eth-types", "geth-utils", @@ -1485,7 +1485,7 @@ dependencies = [ [[package]] name = "gadgets" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "eth-types", "halo2_proofs", @@ -1507,7 +1507,7 @@ dependencies = [ [[package]] name = "geth-utils" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "env_logger 0.10.0", "gobuild", @@ -1684,7 +1684,7 @@ dependencies = [ "log", "num-bigint", "num-traits", - "poseidon-circuit", + "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=scroll-dev-1201)", "rand", "rand_chacha", "serde", @@ -2142,7 +2142,7 @@ dependencies = [ [[package]] name = "keccak256" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "env_logger 0.10.0", "eth-types", @@ -2292,7 +2292,7 @@ dependencies = [ [[package]] name = "mock" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "eth-types", "ethers-core", @@ -2307,7 +2307,7 @@ dependencies = [ [[package]] name = "mpt-zktrie" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "eth-types", "halo2-mpt-circuits", @@ -2315,7 +2315,7 @@ dependencies = [ "hex", "log", "num-bigint", - "poseidon-circuit", + "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)", "zktrie", ] @@ -2671,6 +2671,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "poseidon-circuit" +version = "0.1.0" +source = "git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main#babf5f6a69bec40b2e6523df317c073dcd0b1f97" +dependencies = [ + "bitvec", + "ff 0.13.0", + "halo2_proofs", + "lazy_static", + "log", + "rand", + "rand_xorshift", + "thiserror", +] + [[package]] name = "poseidon-circuit" version = "0.1.0" @@ -2754,7 +2769,7 @@ dependencies = [ [[package]] name = "prover" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "aggregator", "anyhow", @@ -4441,7 +4456,7 @@ dependencies = [ [[package]] name = "zkevm-circuits" version = "0.1.0" -source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.0rc3#632a7906bad9f24f254fec85fd25a4b180b8b4d7" +source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.10.2#31ca6a0068d2c21f6d179780823e47b54403dba8" dependencies = [ "array-init", "bus-mapping", @@ -4465,7 +4480,7 @@ dependencies = [ "mpt-zktrie", "num", "num-bigint", - "poseidon-circuit", + "poseidon-circuit 0.1.0 (git+https://github.com/scroll-tech/poseidon-circuit.git?branch=main)", "rand", "rand_chacha", "rand_xorshift", @@ -4494,6 +4509,7 @@ dependencies = [ "serde", "serde_derive", "serde_json", + "snark-verifier-sdk", ] [[package]] diff --git a/common/libzkp/impl/Cargo.toml b/common/libzkp/impl/Cargo.toml index bf68eb9c4b..59a0682816 100644 --- a/common/libzkp/impl/Cargo.toml +++ b/common/libzkp/impl/Cargo.toml @@ -24,7 +24,8 @@ bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/i [dependencies] halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" } -prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.0rc3", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] } +snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] } +prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.10.2", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] } base64 = "0.13.0" env_logger = "0.9.0" diff --git a/common/libzkp/impl/src/batch.rs b/common/libzkp/impl/src/batch.rs index 059112945c..15307fb86f 100644 --- a/common/libzkp/impl/src/batch.rs +++ b/common/libzkp/impl/src/batch.rs @@ -12,6 +12,7 @@ use prover::{ utils::{chunk_trace_to_witness_block, init_env_and_log}, BatchProof, BlockTrace, ChunkHash, ChunkProof, }; +use snark_verifier_sdk::verify_evm_calldata; use std::{cell::OnceCell, env, ptr::null}; static mut PROVER: OnceCell = OnceCell::new(); @@ -148,11 +149,33 @@ pub unsafe extern "C" fn gen_batch_proof( /// # Safety #[no_mangle] -pub unsafe extern "C" fn verify_batch_proof(proof: *const c_char) -> c_char { +pub unsafe extern "C" fn verify_batch_proof( + proof: *const c_char, + fork_name: *const c_char, +) -> c_char { let proof = c_char_to_vec(proof); let proof = serde_json::from_slice::(proof.as_slice()).unwrap(); - - let verified = panic_catch(|| VERIFIER.get().unwrap().verify_agg_evm_proof(proof)); + let fork_name_str = c_char_to_str(fork_name); + let fork_id = match fork_name_str { + "" => 0, + "shanghai" => 0, + "bernoulli" => 1, + _ => { + log::warn!("unexpected fork_name {fork_name_str}, treated as bernoulli"); + 1 + } + }; + let verified = panic_catch(|| { + if fork_id == 0 { + // before upgrade#2(EIP4844) + verify_evm_calldata( + include_bytes!("evm_verifier_fork_1.bin").to_vec(), + proof.calldata(), + ) + } else { + VERIFIER.get().unwrap().verify_agg_evm_proof(proof) + } + }); verified.unwrap_or(false) as c_char } diff --git a/common/libzkp/impl/src/evm_verifier_fork_1.bin b/common/libzkp/impl/src/evm_verifier_fork_1.bin new file mode 100644 index 0000000000..6b38f5da83 Binary files /dev/null and b/common/libzkp/impl/src/evm_verifier_fork_1.bin differ diff --git a/common/libzkp/interface/libzkp.h b/common/libzkp/interface/libzkp.h index 1c13960879..dab60b7bed 100644 --- a/common/libzkp/interface/libzkp.h +++ b/common/libzkp/interface/libzkp.h @@ -3,7 +3,7 @@ void init_batch_verifier(char* params_dir, char* assets_dir); char* get_batch_vk(); char* check_chunk_proofs(char* chunk_proofs); char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs); -char verify_batch_proof(char* proof); +char verify_batch_proof(char* proof, char* fork_name); void init_chunk_prover(char* params_dir, char* assets_dir); void init_chunk_verifier(char* params_dir, char* assets_dir); diff --git a/common/types/message/message.go b/common/types/message/message.go index b037638da2..4082584937 100644 --- a/common/types/message/message.go +++ b/common/types/message/message.go @@ -76,6 +76,8 @@ type Identity struct { ProverVersion string `json:"prover_version"` // Challenge unique challenge generated by manager Challenge string `json:"challenge"` + // HardForkName the hard fork name + HardForkName string `json:"hard_fork_name"` } // GenerateToken generates token diff --git a/common/types/message/message_test.go b/common/types/message/message_test.go index a0ba35a720..8ea641fae1 100644 --- a/common/types/message/message_test.go +++ b/common/types/message/message_test.go @@ -54,7 +54,7 @@ func TestIdentityHash(t *testing.T) { hash, err := identity.Hash() assert.NoError(t, err) - expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721" + expectedHash := "9b8b00f5655411ec1d68ba1666261281c5414aedbda932e5b6a9f7f1b114fdf2" assert.Equal(t, expectedHash, hex.EncodeToString(hash)) } diff --git a/common/version/version.go b/common/version/version.go index ecd97b2b09..f5e3306201 100644 --- a/common/version/version.go +++ b/common/version/version.go @@ -5,7 +5,7 @@ import ( "runtime/debug" ) -var tag = "v4.3.84" +var tag = "v4.3.88" var commit = func() string { if info, ok := debug.ReadBuildInfo(); ok { diff --git a/contracts/src/L1/rollup/IScrollChain.sol b/contracts/src/L1/rollup/IScrollChain.sol index cdb7f4457e..7af98457f6 100644 --- a/contracts/src/L1/rollup/IScrollChain.sol +++ b/contracts/src/L1/rollup/IScrollChain.sol @@ -2,6 +2,8 @@ pragma solidity ^0.8.24; +/// @title IScrollChain +/// @notice The interface for ScrollChain. interface IScrollChain { /********** * Events * @@ -43,23 +45,23 @@ interface IScrollChain { * Public View Functions * *************************/ - /// @notice The latest finalized batch index. + /// @return The latest finalized batch index. function lastFinalizedBatchIndex() external view returns (uint256); - /// @notice Return the batch hash of a committed batch. /// @param batchIndex The index of the batch. + /// @return The batch hash of a committed batch. function committedBatches(uint256 batchIndex) external view returns (bytes32); - /// @notice Return the state root of a committed batch. /// @param batchIndex The index of the batch. + /// @return The state root of a committed batch. function finalizedStateRoots(uint256 batchIndex) external view returns (bytes32); - /// @notice Return the message root of a committed batch. /// @param batchIndex The index of the batch. + /// @return The message root of a committed batch. function withdrawRoots(uint256 batchIndex) external view returns (bytes32); - /// @notice Return whether the batch is finalized by batch index. /// @param batchIndex The index of the batch. + /// @return Whether the batch is finalized by batch index. function isBatchFinalized(uint256 batchIndex) external view returns (bool); /***************************** diff --git a/contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol b/contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol index ebfccb1e36..7c6a72b814 100644 --- a/contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol +++ b/contracts/src/L1/rollup/MultipleVersionRollupVerifier.sol @@ -8,6 +8,8 @@ import {IScrollChain} from "./IScrollChain.sol"; import {IRollupVerifier} from "../../libraries/verifier/IRollupVerifier.sol"; import {IZkEvmVerifier} from "../../libraries/verifier/IZkEvmVerifier.sol"; +/// @title MultipleVersionRollupVerifier +/// @notice Verifies aggregate zk proofs using the appropriate verifier. contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { /********** * Events * @@ -37,7 +39,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { *************/ /// @notice The address of ScrollChain contract. - address immutable scrollChain; + address public immutable scrollChain; /*********** * Structs * @@ -58,7 +60,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { /// The verifiers are sorted by batchIndex in increasing order. mapping(uint256 => Verifier[]) public legacyVerifiers; - /// @notice Mapping from verifier version to the lastest used zkevm verifier. + /// @notice Mapping from verifier version to the latest used zkevm verifier. mapping(uint256 => Verifier) public latestVerifier; /*************** @@ -86,6 +88,8 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { *************************/ /// @notice Return the number of legacy verifiers. + /// @param _version The version of legacy verifiers. + /// @return The number of legacy verifiers. function legacyVerifiersLength(uint256 _version) external view returns (uint256) { return legacyVerifiers[_version].length; } @@ -93,6 +97,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { /// @notice Compute the verifier should be used for specific batch. /// @param _version The version of verifier to query. /// @param _batchIndex The batch index to query. + /// @return The address of verifier. function getVerifier(uint256 _version, uint256 _batchIndex) public view returns (address) { // Normally, we will use the latest verifier. Verifier memory _verifier = latestVerifier[_version]; @@ -144,6 +149,7 @@ contract MultipleVersionRollupVerifier is IRollupVerifier, Ownable { ************************/ /// @notice Update the address of zkevm verifier. + /// @param _version The version of the verifier. /// @param _startBatchIndex The start batch index when the verifier will be used. /// @param _verifier The address of new verifier. function updateVerifier( diff --git a/contracts/src/L1/rollup/ScrollChain.sol b/contracts/src/L1/rollup/ScrollChain.sol index dc047b2078..8307437387 100644 --- a/contracts/src/L1/rollup/ScrollChain.sol +++ b/contracts/src/L1/rollup/ScrollChain.sol @@ -115,11 +115,11 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { *************/ /// @dev Address of the point evaluation precompile used for EIP-4844 blob verification. - address constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); + address private constant POINT_EVALUATION_PRECOMPILE_ADDR = address(0x0A); /// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the /// point evaluation precompile - uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513; + uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513; /// @notice The chain id of the corresponding layer 2 chain. uint64 public immutable layer2ChainId; @@ -236,6 +236,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain { *****************************/ /// @notice Import layer 2 genesis block + /// @param _batchHeader The header of the genesis batch. + /// @param _stateRoot The state root of the genesis block. function importGenesisBatch(bytes calldata _batchHeader, bytes32 _stateRoot) external { // check genesis batch header length if (_stateRoot == bytes32(0)) revert ErrorStateRootIsZero(); diff --git a/contracts/src/libraries/verifier/IRollupVerifier.sol b/contracts/src/libraries/verifier/IRollupVerifier.sol index 72217d5d0a..3ae9ab1edb 100644 --- a/contracts/src/libraries/verifier/IRollupVerifier.sol +++ b/contracts/src/libraries/verifier/IRollupVerifier.sol @@ -2,6 +2,8 @@ pragma solidity ^0.8.24; +/// @title IRollupVerifier +/// @notice The interface for rollup verifier. interface IRollupVerifier { /// @notice Verify aggregate zk proof. /// @param batchIndex The batch index to verify. diff --git a/contracts/src/libraries/verifier/PatriciaMerkleTrieVerifier.sol b/contracts/src/libraries/verifier/PatriciaMerkleTrieVerifier.sol index 9ef65109df..e1b65ea058 100644 --- a/contracts/src/libraries/verifier/PatriciaMerkleTrieVerifier.sol +++ b/contracts/src/libraries/verifier/PatriciaMerkleTrieVerifier.sol @@ -199,7 +199,7 @@ library PatriciaMerkleTrieVerifier { } // decodes all RLP encoded data and stores their DATA items - // [length - 128 bits | calldata offset - 128 bits] in a continous memory region. + // [length - 128 bits | calldata offset - 128 bits] in a continuous memory region. // Expects that the RLP starts with a list that defines the length // of the whole RLP region. function decodeFlat(_ptr) -> ptr, memStart, nItems, hash { @@ -505,7 +505,7 @@ library PatriciaMerkleTrieVerifier { } // the one and only boundary check - // in case an attacker crafted a malicous payload + // in case an attacker crafted a malicious payload // and succeeds in the prior verification steps // then this should catch any bogus accesses if iszero(eq(ptr, add(proof.offset, proof.length))) { diff --git a/coordinator/conf/config.json b/coordinator/conf/config.json index b143a702f7..b5a09e25ad 100644 --- a/coordinator/conf/config.json +++ b/coordinator/conf/config.json @@ -5,6 +5,7 @@ "batch_collection_time_sec": 180, "chunk_collection_time_sec": 180, "verifier": { + "fork_name": "bernoulli", "mock_mode": true, "params_path": "", "assets_path": "" diff --git a/coordinator/internal/config/config.go b/coordinator/internal/config/config.go index 33142b1138..55c5c68a83 100644 --- a/coordinator/internal/config/config.go +++ b/coordinator/internal/config/config.go @@ -50,6 +50,7 @@ type Config struct { // VerifierConfig load zk verifier config. type VerifierConfig struct { + ForkName string `json:"fork_name"` MockMode bool `json:"mock_mode"` ParamsPath string `json:"params_path"` AssetsPath string `json:"assets_path"` diff --git a/coordinator/internal/controller/api/auth.go b/coordinator/internal/controller/api/auth.go index 043d0d08d8..f3d3d7aa1a 100644 --- a/coordinator/internal/controller/api/auth.go +++ b/coordinator/internal/controller/api/auth.go @@ -59,6 +59,7 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims { Challenge: v.Message.Challenge, ProverName: v.Message.ProverName, ProverVersion: v.Message.ProverVersion, + HardForkName: v.Message.HardForkName, }, Signature: v.Signature, } @@ -68,10 +69,15 @@ func (a *AuthController) PayloadFunc(data interface{}) jwt.MapClaims { return jwt.MapClaims{} } + if v.Message.HardForkName == "" { + v.Message.HardForkName = "shanghai" + } + return jwt.MapClaims{ types.PublicKey: publicKey, types.ProverName: v.Message.ProverName, types.ProverVersion: v.Message.ProverVersion, + types.HardForkName: v.Message.HardForkName, } } @@ -89,5 +95,9 @@ func (a *AuthController) IdentityHandler(c *gin.Context) interface{} { if proverVersion, ok := claims[types.ProverVersion]; ok { c.Set(types.ProverVersion, proverVersion) } + + if hardForkName, ok := claims[types.HardForkName]; ok { + c.Set(types.HardForkName, hardForkName) + } return nil } diff --git a/coordinator/internal/controller/api/controller.go b/coordinator/internal/controller/api/controller.go index 0ec8dd8466..a1bf61fd7d 100644 --- a/coordinator/internal/controller/api/controller.go +++ b/coordinator/internal/controller/api/controller.go @@ -2,6 +2,7 @@ package api import ( "github.com/prometheus/client_golang/prometheus" + "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" "gorm.io/gorm" @@ -25,6 +26,8 @@ func InitController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.D panic("proof receiver new verifier failure") } + log.Info("verifier created", "chunkVerifier", vf.ChunkVKMap, "batchVerifier", vf.BatchVKMap) + Auth = NewAuthController(db) GetTask = NewGetTaskController(cfg, chainCfg, db, vf, reg) SubmitProof = NewSubmitProofController(cfg, db, vf, reg) diff --git a/coordinator/internal/controller/api/get_task.go b/coordinator/internal/controller/api/get_task.go index 8cb849fbf7..e9276c6b16 100644 --- a/coordinator/internal/controller/api/get_task.go +++ b/coordinator/internal/controller/api/get_task.go @@ -6,6 +6,8 @@ import ( "github.com/gin-gonic/gin" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/params" "gorm.io/gorm" @@ -21,15 +23,21 @@ import ( // GetTaskController the get prover task api controller type GetTaskController struct { proverTasks map[message.ProofType]provertask.ProverTask + + getTaskAccessCounter *prometheus.CounterVec } // NewGetTaskController create a get prover task controller func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vf *verifier.Verifier, reg prometheus.Registerer) *GetTaskController { - chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVK, reg) - batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVK, reg) + chunkProverTask := provertask.NewChunkProverTask(cfg, chainCfg, db, vf.ChunkVKMap, reg) + batchProverTask := provertask.NewBatchProverTask(cfg, chainCfg, db, vf.BatchVKMap, reg) ptc := &GetTaskController{ proverTasks: make(map[message.ProofType]provertask.ProverTask), + getTaskAccessCounter: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "coordinator_get_task_access_count", + Help: "Multi dimensions get task counter.", + }, []string{coordinatorType.LabelProverName, coordinatorType.LabelProverPublicKey, coordinatorType.LabelProverVersion}), } ptc.proverTasks[message.ProofTypeChunk] = chunkProverTask @@ -38,6 +46,28 @@ func NewGetTaskController(cfg *config.Config, chainCfg *params.ChainConfig, db * return ptc } +func (ptc *GetTaskController) incGetTaskAccessCounter(ctx *gin.Context) error { + publicKey, publicKeyExist := ctx.Get(coordinatorType.PublicKey) + if !publicKeyExist { + return fmt.Errorf("get public key from context failed") + } + proverName, proverNameExist := ctx.Get(coordinatorType.ProverName) + if !proverNameExist { + return fmt.Errorf("get prover name from context failed") + } + proverVersion, proverVersionExist := ctx.Get(coordinatorType.ProverVersion) + if !proverVersionExist { + return fmt.Errorf("get prover version from context failed") + } + + ptc.getTaskAccessCounter.With(prometheus.Labels{ + coordinatorType.LabelProverPublicKey: publicKey.(string), + coordinatorType.LabelProverName: proverName.(string), + coordinatorType.LabelProverVersion: proverVersion.(string), + }).Inc() + return nil +} + // GetTasks get assigned chunk/batch task func (ptc *GetTaskController) GetTasks(ctx *gin.Context) { var getTaskParameter coordinatorType.GetTaskParameter @@ -55,6 +85,10 @@ func (ptc *GetTaskController) GetTasks(ctx *gin.Context) { return } + if err := ptc.incGetTaskAccessCounter(ctx); err != nil { + log.Warn("get_task access counter inc failed", "error", err.Error()) + } + result, err := proverTask.Assign(ctx, &getTaskParameter) if err != nil { nerr := fmt.Errorf("return prover task err:%w", err) diff --git a/coordinator/internal/logic/provertask/batch_prover_task.go b/coordinator/internal/logic/provertask/batch_prover_task.go index 4c9c2e9671..20c3e296d1 100644 --- a/coordinator/internal/logic/provertask/batch_prover_task.go +++ b/coordinator/internal/logic/provertask/batch_prover_task.go @@ -31,16 +31,17 @@ type BatchProverTask struct { batchAttemptsExceedTotal prometheus.Counter batchTaskGetTaskTotal *prometheus.CounterVec + batchTaskGetTaskProver *prometheus.CounterVec } // NewBatchProverTask new a batch collector -func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *BatchProverTask { +func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *BatchProverTask { forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg) log.Info("new batch prover task", "forkHeights", forkHeights, "nameForks", nameForkMap) bp := &BatchProverTask{ BaseProverTask: BaseProverTask{ - vk: vk, + vkMap: vkMap, db: db, cfg: cfg, nameForkMap: nameForkMap, @@ -58,6 +59,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go Name: "coordinator_batch_get_task_total", Help: "Total number of batch get task.", }, []string{"fork_name"}), + batchTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "batch"), } return bp } @@ -69,9 +71,9 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) } - hardForkNumber, err := bp.getHardForkNumberByName(getTaskParameter.HardForkName) + hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName) if err != nil { - log.Error("batch assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName) + log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName) return nil, err } @@ -83,7 +85,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato if fromBlockNum != 0 { startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, fromBlockNum) if chunkErr != nil { - log.Error("failed to get fork start chunk index", "forkName", getTaskParameter.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr) + log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr) return nil, ErrCoordinatorInternalFailure } if startChunk == nil { @@ -93,8 +95,8 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato } if toBlockNum != math.MaxInt64 { toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx, toBlockNum) - if err != nil { - log.Error("failed to get fork end chunk index", "forkName", getTaskParameter.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr) + if chunkErr != nil { + log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr) return nil, ErrCoordinatorInternalFailure } if toChunk != nil { @@ -179,7 +181,12 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato return nil, ErrCoordinatorInternalFailure } - bp.batchTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc() + bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc() + bp.batchTaskGetTaskProver.With(prometheus.Labels{ + coordinatorType.LabelProverName: proverTask.ProverName, + coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey, + coordinatorType.LabelProverVersion: proverTask.ProverVersion, + }).Inc() return taskMsg, nil } diff --git a/coordinator/internal/logic/provertask/chunk_prover_task.go b/coordinator/internal/logic/provertask/chunk_prover_task.go index d65726336f..fe079153ff 100644 --- a/coordinator/internal/logic/provertask/chunk_prover_task.go +++ b/coordinator/internal/logic/provertask/chunk_prover_task.go @@ -29,15 +29,16 @@ type ChunkProverTask struct { chunkAttemptsExceedTotal prometheus.Counter chunkTaskGetTaskTotal *prometheus.CounterVec + chunkTaskGetTaskProver *prometheus.CounterVec } // NewChunkProverTask new a chunk prover task -func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vk string, reg prometheus.Registerer) *ChunkProverTask { +func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *gorm.DB, vkMap map[string]string, reg prometheus.Registerer) *ChunkProverTask { forkHeights, _, nameForkMap := forks.CollectSortedForkHeights(chainCfg) log.Info("new chunk prover task", "forkHeights", forkHeights, "nameForks", nameForkMap) cp := &ChunkProverTask{ BaseProverTask: BaseProverTask{ - vk: vk, + vkMap: vkMap, db: db, cfg: cfg, nameForkMap: nameForkMap, @@ -55,6 +56,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go Name: "coordinator_chunk_get_task_total", Help: "Total number of chunk get task.", }, []string{"fork_name"}), + chunkTaskGetTaskProver: newGetTaskCounterVec(promauto.With(reg), "chunk"), } return cp } @@ -66,9 +68,9 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato return nil, fmt.Errorf("check prover task parameter failed, error:%w", err) } - hardForkNumber, err := cp.getHardForkNumberByName(getTaskParameter.HardForkName) + hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName) if err != nil { - log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", getTaskParameter.HardForkName) + log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName) return nil, err } @@ -151,7 +153,12 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato return nil, ErrCoordinatorInternalFailure } - cp.chunkTaskGetTaskTotal.WithLabelValues(getTaskParameter.HardForkName).Inc() + cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc() + cp.chunkTaskGetTaskProver.With(prometheus.Labels{ + coordinatorType.LabelProverName: proverTask.ProverName, + coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey, + coordinatorType.LabelProverVersion: proverTask.ProverVersion, + }).Inc() return taskMsg, nil } diff --git a/coordinator/internal/logic/provertask/prover_task.go b/coordinator/internal/logic/provertask/prover_task.go index 74a176c7e8..980dc569ea 100644 --- a/coordinator/internal/logic/provertask/prover_task.go +++ b/coordinator/internal/logic/provertask/prover_task.go @@ -2,8 +2,12 @@ package provertask import ( "fmt" + "sync" "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/scroll-tech/go-ethereum/log" "gorm.io/gorm" "scroll-tech/common/version" @@ -13,11 +17,12 @@ import ( coordinatorType "scroll-tech/coordinator/internal/types" ) -// ErrCoordinatorInternalFailure coordinator internal db failure -var ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error") - -// ErrHardForkName indicates client request with the wrong hard fork name -var ErrHardForkName = fmt.Errorf("wrong hard fork name") +var ( + // ErrCoordinatorInternalFailure coordinator internal db failure + ErrCoordinatorInternalFailure = fmt.Errorf("coordinator internal error") + // ErrHardForkName indicates client request with the wrong hard fork name + ErrHardForkName = fmt.Errorf("wrong hard fork name") +) // ProverTask the interface of a collector who send data to prover type ProverTask interface { @@ -28,8 +33,8 @@ type ProverTask interface { type BaseProverTask struct { cfg *config.Config db *gorm.DB - vk string + vkMap map[string]string nameForkMap map[string]uint64 forkHeights []uint64 @@ -44,6 +49,7 @@ type proverTaskContext struct { PublicKey string ProverName string ProverVersion string + HardForkName string } // checkParameter check the prover task parameter illegal @@ -68,12 +74,24 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor } ptc.ProverVersion = proverVersion.(string) + hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName) + if !hardForkNameExist { + return nil, fmt.Errorf("get hard fork name from context failed") + } + ptc.HardForkName = hardForkName.(string) + if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) { return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string)) } + vk, vkExist := b.vkMap[ptc.HardForkName] + if !vkExist { + return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap) + } + // if the prover has a different vk - if getTaskParameter.VK != b.vk { + if getTaskParameter.VK != vk { + log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName) // if the prover reports a different prover version if !version.CheckScrollProverVersion(proverVersion.(string)) { return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string)) @@ -115,3 +133,22 @@ func (b *BaseProverTask) getHardForkNumberByName(forkName string) (uint64, error return hardForkNumber, nil } + +var ( + getTaskCounterInitOnce sync.Once + getTaskCounterVec *prometheus.CounterVec = nil +) + +func newGetTaskCounterVec(factory promauto.Factory, taskType string) *prometheus.CounterVec { + getTaskCounterInitOnce.Do(func() { + getTaskCounterVec = factory.NewCounterVec(prometheus.CounterOpts{ + Name: "coordinator_get_task_count", + Help: "Multi dimensions get task counter.", + }, []string{"task_type", + coordinatorType.LabelProverName, + coordinatorType.LabelProverPublicKey, + coordinatorType.LabelProverVersion}) + }) + + return getTaskCounterVec.MustCurryWith(prometheus.Labels{"task_type": taskType}) +} diff --git a/coordinator/internal/logic/submitproof/proof_receiver.go b/coordinator/internal/logic/submitproof/proof_receiver.go index bcc61b6d04..fa01d4c12a 100644 --- a/coordinator/internal/logic/submitproof/proof_receiver.go +++ b/coordinator/internal/logic/submitproof/proof_receiver.go @@ -134,6 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P if len(pv) == 0 { return fmt.Errorf("get ProverVersion from context failed") } + hardForkName := ctx.GetString(coordinatorType.HardForkName) var proverTask *orm.ProverTask var err error @@ -156,20 +157,19 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P proofTimeSec := uint64(proofTime.Seconds()) log.Info("handling zk proof", "proofID", proofMsg.ID, "proverName", proverTask.ProverName, - "proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec) + "proverPublicKey", pk, "proveType", proverTask.TaskType, "proofTime", proofTimeSec, "hardForkName", hardForkName) - if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter); err != nil { + if err = m.validator(ctx, proverTask, pk, proofMsg, proofParameter, hardForkName); err != nil { return err } m.verifierTotal.WithLabelValues(pv).Inc() - var success bool + success := true var verifyErr error - if proofMsg.Type == message.ProofTypeChunk { - success, verifyErr = m.verifier.VerifyChunkProof(proofMsg.ChunkProof) - } else if proofMsg.Type == message.ProofTypeBatch { - success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof) + // only verify batch proof. chunk proof verifier have been disabled after Bernoulli + if proofMsg.Type == message.ProofTypeBatch { + success, verifyErr = m.verifier.VerifyBatchProof(proofMsg.BatchProof, hardForkName) } if verifyErr != nil || !success { @@ -178,7 +178,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P m.proofRecover(ctx, proverTask, types.ProverTaskFailureTypeVerifiedFailed, proofMsg) log.Info("proof verified by coordinator failed", "proof id", proofMsg.ID, "prover name", proverTask.ProverName, - "prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr) + "prover pk", pk, "forkName", hardForkName, "prove type", proofMsg.Type, "proof time", proofTimeSec, "error", verifyErr) if verifyErr != nil { return ErrValidatorFailureVerifiedFailed @@ -189,7 +189,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P m.proverTaskProveDuration.Observe(time.Since(proverTask.CreatedAt).Seconds()) log.Info("proof verified and valid", "proof id", proofMsg.ID, "prover name", proverTask.ProverName, - "prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec) + "prover pk", pk, "prove type", proofMsg.Type, "proof time", proofTimeSec, "forkName", hardForkName) if err := m.closeProofTask(ctx, proverTask, proofMsg, proofTimeSec); err != nil { m.proofSubmitFailure.Inc() @@ -221,7 +221,7 @@ func (m *ProofReceiverLogic) checkAreAllChunkProofsReady(ctx context.Context, ch return nil } -func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter) (err error) { +func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.ProverTask, pk string, proofMsg *message.ProofMsg, proofParameter coordinatorType.SubmitProofParameter, forkName string) (err error) { defer func() { if err != nil { m.validateFailureTotal.Inc() @@ -240,7 +240,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov "cannot submit valid proof for a prover task twice", "taskType", proverTask.TaskType, "hash", proofMsg.ID, "proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion, - "proverPublicKey", proverTask.ProverPublicKey, + "proverPublicKey", proverTask.ProverPublicKey, "forkName", forkName, ) return ErrValidatorFailureProverTaskCannotSubmitTwice } @@ -259,7 +259,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov log.Info("proof generated by prover failed", "taskType", proofMsg.Type, "hash", proofMsg.ID, "proverName", proverTask.ProverName, "proverVersion", proverTask.ProverVersion, "proverPublicKey", pk, "failureType", proofParameter.FailureType, - "failureMessage", failureMsg) + "failureMessage", failureMsg, "forkName", forkName) return ErrValidatorFailureProofMsgStatusNotOk } @@ -267,13 +267,13 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov if types.ProverTaskFailureType(proverTask.FailureType) == types.ProverTaskFailureTypeTimeout { m.validateFailureProverTaskTimeout.Inc() log.Info("proof submit proof have timeout, skip this submit proof", "hash", proofMsg.ID, "taskType", proverTask.TaskType, - "proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec) + "proverName", proverTask.ProverName, "proverPublicKey", pk, "proofTime", proofTimeSec, "forkName", forkName) return ErrValidatorFailureProofTimeout } // store the proof to prover task if updateTaskProofErr := m.updateProverTaskProof(ctx, proverTask, proofMsg); updateTaskProofErr != nil { - log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, + log.Warn("update prover task proof failure", "hash", proofMsg.ID, "proverPublicKey", pk, "forkName", forkName, "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "error", updateTaskProofErr) } @@ -281,7 +281,7 @@ func (m *ProofReceiverLogic) validator(ctx context.Context, proverTask *orm.Prov if m.checkIsTaskSuccess(ctx, proofMsg.ID, proofMsg.Type) { m.validateFailureProverTaskHaveVerifier.Inc() log.Info("the prove task have proved and verifier success, skip this submit proof", "hash", proofMsg.ID, - "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk) + "taskType", proverTask.TaskType, "proverName", proverTask.ProverName, "proverPublicKey", pk, "forkName", forkName) return ErrValidatorFailureTaskHaveVerifiedSuccess } return nil diff --git a/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey b/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey new file mode 100644 index 0000000000..a2cdc25cd2 Binary files /dev/null and b/coordinator/internal/logic/verifier/legacy_vk/agg_vk.vkey differ diff --git a/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey b/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey new file mode 100644 index 0000000000..fa54405f40 Binary files /dev/null and b/coordinator/internal/logic/verifier/legacy_vk/chunk_vk.vkey differ diff --git a/coordinator/internal/logic/verifier/mock.go b/coordinator/internal/logic/verifier/mock.go index 37568078bf..114a452399 100644 --- a/coordinator/internal/logic/verifier/mock.go +++ b/coordinator/internal/logic/verifier/mock.go @@ -9,8 +9,26 @@ import ( ) // NewVerifier Sets up a mock verifier. -func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) { - return &Verifier{}, nil +func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { + batchVKMap := map[string]string{ + "shanghai": "", + "bernoulli": "", + "london": "", + "istanbul": "", + "homestead": "", + "eip155": "", + } + chunkVKMap := map[string]string{ + "shanghai": "", + "bernoulli": "", + "london": "", + "istanbul": "", + "homestead": "", + "eip155": "", + } + batchVKMap[cfg.ForkName] = "" + chunkVKMap[cfg.ForkName] = "" + return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil } // VerifyChunkProof return a mock verification result for a ChunkProof. @@ -22,7 +40,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) { } // VerifyBatchProof return a mock verification result for a BatchProof. -func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) { +func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) { if string(proof.Proof) == InvalidTestProof { return false, nil } diff --git a/coordinator/internal/logic/verifier/types.go b/coordinator/internal/logic/verifier/types.go index 9a8dc07af7..854e926fb5 100644 --- a/coordinator/internal/logic/verifier/types.go +++ b/coordinator/internal/logic/verifier/types.go @@ -9,7 +9,7 @@ const InvalidTestProof = "this is a invalid proof" // Verifier represents a rust ffi to a halo2 verifier. type Verifier struct { - cfg *config.VerifierConfig - BatchVK string - ChunkVK string + cfg *config.VerifierConfig + ChunkVKMap map[string]string + BatchVKMap map[string]string } diff --git a/coordinator/internal/logic/verifier/verifier.go b/coordinator/internal/logic/verifier/verifier.go index 38139eaabf..11fadd1062 100644 --- a/coordinator/internal/logic/verifier/verifier.go +++ b/coordinator/internal/logic/verifier/verifier.go @@ -11,9 +11,11 @@ package verifier import "C" //nolint:typecheck import ( + "embed" "encoding/base64" "encoding/json" "io" + "io/fs" "os" "path" "unsafe" @@ -28,7 +30,26 @@ import ( // NewVerifier Sets up a rust ffi to call verify. func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { if cfg.MockMode { - return &Verifier{cfg: cfg}, nil + batchVKMap := map[string]string{ + "shanghai": "", + "bernoulli": "", + "london": "", + "istanbul": "", + "homestead": "", + "eip155": "", + } + chunkVKMap := map[string]string{ + "shanghai": "", + "bernoulli": "", + "london": "", + "istanbul": "", + "homestead": "", + "eip155": "", + } + + batchVKMap[cfg.ForkName] = "" + chunkVKMap[cfg.ForkName] = "" + return &Verifier{cfg: cfg, ChunkVKMap: chunkVKMap, BatchVKMap: batchVKMap}, nil } paramsPathStr := C.CString(cfg.ParamsPath) assetsPathStr := C.CString(cfg.AssetsPath) @@ -40,25 +61,31 @@ func NewVerifier(cfg *config.VerifierConfig) (*Verifier, error) { C.init_batch_verifier(paramsPathStr, assetsPathStr) C.init_chunk_verifier(paramsPathStr, assetsPathStr) - batchVK, err := readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey")) + v := &Verifier{ + cfg: cfg, + ChunkVKMap: make(map[string]string), + BatchVKMap: make(map[string]string), + } + + batchVK, err := v.readVK(path.Join(cfg.AssetsPath, "agg_vk.vkey")) if err != nil { return nil, err } - - chunkVK, err := readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey")) + chunkVK, err := v.readVK(path.Join(cfg.AssetsPath, "chunk_vk.vkey")) if err != nil { return nil, err } + v.BatchVKMap[cfg.ForkName] = batchVK + v.ChunkVKMap[cfg.ForkName] = chunkVK - return &Verifier{ - cfg: cfg, - BatchVK: batchVK, - ChunkVK: chunkVK, - }, nil + if err := v.loadEmbedVK(); err != nil { + return nil, err + } + return v, nil } // VerifyBatchProof Verify a ZkProof by marshaling it and sending it to the Halo2 Verifier. -func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) { +func (v *Verifier) VerifyBatchProof(proof *message.BatchProof, forkName string) (bool, error) { if v.cfg.MockMode { log.Info("Mock mode, batch verifier disabled") if string(proof.Proof) == InvalidTestProof { @@ -72,13 +99,15 @@ func (v *Verifier) VerifyBatchProof(proof *message.BatchProof) (bool, error) { return false, err } + log.Info("Start to verify batch proof", "forkName", forkName) proofStr := C.CString(string(buf)) + forkNameStr := C.CString(forkName) defer func() { C.free(unsafe.Pointer(proofStr)) + C.free(unsafe.Pointer(forkNameStr)) }() - log.Info("Start to verify batch proof ...") - verified := C.verify_batch_proof(proofStr) + verified := C.verify_batch_proof(proofStr, forkNameStr) return verified != 0, nil } @@ -107,7 +136,7 @@ func (v *Verifier) VerifyChunkProof(proof *message.ChunkProof) (bool, error) { return verified != 0, nil } -func readVK(filePat string) (string, error) { +func (v *Verifier) readVK(filePat string) (string, error) { f, err := os.Open(filePat) if err != nil { return "", err @@ -118,3 +147,26 @@ func readVK(filePat string) (string, error) { } return base64.StdEncoding.EncodeToString(byt), nil } + +//go:embed legacy_vk/* +var legacyVKFS embed.FS + +func (v *Verifier) loadEmbedVK() error { + batchVKBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/agg_vk.vkey") + if err != nil { + log.Error("load embed batch vk failure", "err", err) + return err + } + + chunkVkBytes, err := fs.ReadFile(legacyVKFS, "legacy_vk/chunk_vk.vkey") + if err != nil { + log.Error("load embed chunk vk failure", "err", err) + return err + } + + v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes) + v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes) + v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes) + v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes) + return nil +} diff --git a/coordinator/internal/logic/verifier/verifier_test.go b/coordinator/internal/logic/verifier/verifier_test.go index 4edd00822e..a4e807123a 100644 --- a/coordinator/internal/logic/verifier/verifier_test.go +++ b/coordinator/internal/logic/verifier/verifier_test.go @@ -14,7 +14,6 @@ import ( "scroll-tech/common/types/message" "scroll-tech/coordinator/internal/config" - "scroll-tech/coordinator/internal/logic/verifier" ) var ( @@ -34,7 +33,7 @@ func TestFFI(t *testing.T) { AssetsPath: *assetsPath, } - v, err := verifier.NewVerifier(cfg) + v, err := NewVerifier(cfg) as.NoError(err) chunkProof1 := readChunkProof(*chunkProofPath1, as) @@ -50,7 +49,7 @@ func TestFFI(t *testing.T) { t.Log("Verified chunk proof 2") batchProof := readBatchProof(*batchProofPath, as) - batchOk, err := v.VerifyBatchProof(batchProof) + batchOk, err := v.VerifyBatchProof(batchProof, "bernoulli") as.NoError(err) as.True(batchOk) t.Log("Verified batch proof") diff --git a/coordinator/internal/types/auth.go b/coordinator/internal/types/auth.go index cf1d03106a..e89e25c185 100644 --- a/coordinator/internal/types/auth.go +++ b/coordinator/internal/types/auth.go @@ -9,6 +9,8 @@ const ( ProverName = "prover_name" // ProverVersion the prover version for context ProverVersion = "prover_version" + // HardForkName the fork name for context + HardForkName = "hard_fork_name" ) // Message the login message struct @@ -16,6 +18,7 @@ type Message struct { Challenge string `form:"challenge" json:"challenge" binding:"required"` ProverVersion string `form:"prover_version" json:"prover_version" binding:"required"` ProverName string `form:"prover_name" json:"prover_name" binding:"required"` + HardForkName string `form:"hard_fork_name" json:"hard_fork_name"` } // LoginParameter for /login api diff --git a/coordinator/internal/types/get_task.go b/coordinator/internal/types/get_task.go index c92f4524fe..82849eb49b 100644 --- a/coordinator/internal/types/get_task.go +++ b/coordinator/internal/types/get_task.go @@ -2,7 +2,6 @@ package types // GetTaskParameter for ProverTasks request parameter type GetTaskParameter struct { - HardForkName string `form:"hard_fork_name" json:"hard_fork_name"` ProverHeight uint64 `form:"prover_height" json:"prover_height"` TaskType int `form:"task_type" json:"task_type"` VK string `form:"vk" json:"vk"` diff --git a/coordinator/internal/types/metric.go b/coordinator/internal/types/metric.go new file mode 100644 index 0000000000..30b2beb382 --- /dev/null +++ b/coordinator/internal/types/metric.go @@ -0,0 +1,10 @@ +package types + +var ( + // LabelProverName label name for prover name; common label name using in prometheus metrics, same rule applies to below. + LabelProverName = "prover_name" + // LabelProverPublicKey label name for prover public key + LabelProverPublicKey = "prover_pubkey" + // LabelProverVersion label name for prover version + LabelProverVersion = "prover_version" +) diff --git a/coordinator/test/api_test.go b/coordinator/test/api_test.go index e1006fc564..2c8870fe61 100644 --- a/coordinator/test/api_test.go +++ b/coordinator/test/api_test.go @@ -96,8 +96,10 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri ChainID: 111, }, ProverManager: &config.ProverManager{ - ProversPerSession: proversPerSession, - Verifier: &config.VerifierConfig{MockMode: true}, + ProversPerSession: proversPerSession, + Verifier: &config.VerifierConfig{ + MockMode: true, + }, BatchCollectionTimeSec: 10, ChunkCollectionTimeSec: 10, MaxVerifierWorkers: 10, @@ -113,6 +115,8 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri var chainConf params.ChainConfig for forkName, forkNumber := range nameForkMap { switch forkName { + case "shanghai": + chainConf.ShanghaiBlock = big.NewInt(forkNumber) case "bernoulli": chainConf.BernoulliBlock = big.NewInt(forkNumber) case "london": @@ -258,12 +262,12 @@ func testGetTaskBlocked(t *testing.T) { assert.NoError(t, err) expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", chunkProver.publicKey(), chunkProver.proverName, chunkProver.proverVersion) - code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) + code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead") assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) expectedErr = fmt.Errorf("get empty prover task") - code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) + code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead") assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) @@ -274,12 +278,12 @@ func testGetTaskBlocked(t *testing.T) { assert.NoError(t, err) expectedErr = fmt.Errorf("get empty prover task") - code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk) + code, errMsg = chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead") assert.Equal(t, types.ErrCoordinatorEmptyProofData, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:public key %s is blocked from fetching tasks. ProverName: %s, ProverVersion: %s", batchProver.publicKey(), batchProver.proverName, batchProver.proverVersion) - code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) + code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead") assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) } @@ -299,12 +303,12 @@ func testOutdatedProverVersion(t *testing.T) { assert.True(t, chunkProver.healthCheckSuccess(t)) expectedErr := fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, chunkProver.proverVersion) - code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk) + code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk, "homestead") assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) expectedErr = fmt.Errorf("return prover task err:check prover task parameter failed, error:incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", version.Version, batchProver.proverVersion) - code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch) + code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch, "homestead") assert.Equal(t, types.ErrCoordinatorGetTaskFailure, code) assert.Equal(t, expectedErr, fmt.Errorf(errMsg)) } @@ -358,7 +362,7 @@ func testHardForkAssignTask(t *testing.T) { { name: "noTaskForkBatchProverVersionLessThanHardForkProverNumberEqual0", proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"istanbul": forkNumberTwo, "london": forkNumberThree}, + forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree}, exceptTaskNumber: 0, proverForkNames: []string{"", ""}, exceptGetTaskErrCodes: []int{types.ErrCoordinatorEmptyProofData, types.ErrCoordinatorEmptyProofData}, @@ -448,7 +452,7 @@ func testHardForkAssignTask(t *testing.T) { { // hard fork 3, prover1:2 prover2:3 block [2-3] name: "twoTaskForkChunkProverVersionMiddleHardForkProverNumberEqual0", proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"london": forkNumberThree}, + forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree}, exceptTaskNumber: 2, proverForkNames: []string{"", "london"}, exceptGetTaskErrCodes: []int{types.Success, types.Success}, @@ -457,7 +461,7 @@ func testHardForkAssignTask(t *testing.T) { { name: "twoTaskForkBatchProverVersionMiddleHardForkProverNumberEqual0", proofType: message.ProofTypeBatch, - forkNumbers: map[string]int64{"london": forkNumberThree}, + forkNumbers: map[string]int64{"shanghai": forkNumberTwo, "london": forkNumberThree}, exceptTaskNumber: 2, proverForkNames: []string{"", "london"}, exceptGetTaskErrCodes: []int{types.Success, types.Success}, @@ -466,7 +470,7 @@ func testHardForkAssignTask(t *testing.T) { { // hard fork 2, prover 2 block [2-3] name: "oneTaskForkChunkProverVersionLessThanHardForkProverNumberEqual0", proofType: message.ProofTypeChunk, - forkNumbers: map[string]int64{"london": forkNumberThree}, + forkNumbers: map[string]int64{"shanghai": forkNumberOne, "london": forkNumberThree}, exceptTaskNumber: 1, proverForkNames: []string{"", ""}, exceptGetTaskErrCodes: []int{types.Success, types.ErrCoordinatorEmptyProofData}, @@ -534,7 +538,7 @@ func testHardForkAssignTask(t *testing.T) { continue } getTaskNumber++ - mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success) + mockProver.submitProof(t, proverTask, verifiedSuccess, types.Success, tt.proverForkNames[i]) } assert.Equal(t, getTaskNumber, tt.exceptTaskNumber) }) @@ -577,7 +581,7 @@ func testValidProof(t *testing.T) { assert.Equal(t, errCode, types.Success) assert.Equal(t, errMsg, "") assert.NotNil(t, proverTask) - provers[i].submitProof(t, proverTask, proofStatus, types.Success) + provers[i].submitProof(t, proverTask, proofStatus, types.Success, "istanbul") } // verify proof status @@ -643,34 +647,21 @@ func testInvalidProof(t *testing.T) { err = batchOrm.UpdateChunkProofsStatusByBatchHash(context.Background(), batch.Hash, types.ChunkProofsStatusReady) assert.NoError(t, err) - // create mock provers. - provers := make([]*mockProver, 2) - for i := 0; i < len(provers); i++ { - var proofType message.ProofType - if i%2 == 0 { - proofType = message.ProofTypeChunk - } else { - proofType = message.ProofTypeBatch - } - provers[i] = newMockProver(t, "prover_test"+strconv.Itoa(i), coordinatorURL, proofType, version.Version) - proverTask, errCode, errMsg := provers[i].getProverTask(t, proofType, "istanbul") - assert.NotNil(t, proverTask) - assert.Equal(t, errCode, types.Success) - assert.Equal(t, errMsg, "") - provers[i].submitProof(t, proverTask, verifiedFailed, types.ErrCoordinatorHandleZkProofFailure) - } + proofType := message.ProofTypeBatch + provingStatus := verifiedFailed + expectErrCode := types.ErrCoordinatorHandleZkProofFailure + prover := newMockProver(t, "prover_test", coordinatorURL, proofType, version.Version) + proverTask, errCode, errMsg := prover.getProverTask(t, proofType, "istanbul") + assert.NotNil(t, proverTask) + assert.Equal(t, errCode, types.Success) + assert.Equal(t, errMsg, "") + prover.submitProof(t, proverTask, provingStatus, expectErrCode, "istanbul") // verify proof status var ( - tick = time.Tick(1500 * time.Millisecond) - tickStop = time.Tick(time.Minute) - ) - - var ( - chunkProofStatus types.ProvingStatus + tick = time.Tick(1500 * time.Millisecond) + tickStop = time.Tick(time.Minute) batchProofStatus types.ProvingStatus - chunkActiveAttempts int16 - chunkMaxAttempts int16 batchActiveAttempts int16 batchMaxAttempts int16 ) @@ -678,24 +669,17 @@ func testInvalidProof(t *testing.T) { for { select { case <-tick: - chunkProofStatus, err = chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) - assert.NoError(t, err) batchProofStatus, err = batchOrm.GetProvingStatusByHash(context.Background(), batch.Hash) assert.NoError(t, err) - if chunkProofStatus == types.ProvingTaskAssigned && batchProofStatus == types.ProvingTaskAssigned { + if batchProofStatus == types.ProvingTaskAssigned { return } - chunkActiveAttempts, chunkMaxAttempts, err = chunkOrm.GetAttemptsByHash(context.Background(), dbChunk.Hash) - assert.NoError(t, err) - assert.Equal(t, 1, int(chunkMaxAttempts)) - assert.Equal(t, 0, int(chunkActiveAttempts)) - batchActiveAttempts, batchMaxAttempts, err = batchOrm.GetAttemptsByHash(context.Background(), batch.Hash) assert.NoError(t, err) assert.Equal(t, 1, int(batchMaxAttempts)) assert.Equal(t, 0, int(batchActiveAttempts)) case <-tickStop: - t.Error("failed to check proof status", "chunkProofStatus", chunkProofStatus.String(), "batchProofStatus", batchProofStatus.String()) + t.Error("failed to check proof status", "batchProofStatus", batchProofStatus.String()) return } } @@ -735,7 +719,7 @@ func testProofGeneratedFailed(t *testing.T) { assert.NotNil(t, proverTask) assert.Equal(t, errCode, types.Success) assert.Equal(t, errMsg, "") - provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure) + provers[i].submitProof(t, proverTask, generatedFailed, types.ErrCoordinatorHandleZkProofFailure, "istanbul") } // verify proof status @@ -858,14 +842,14 @@ func testTimeoutProof(t *testing.T) { assert.NotNil(t, proverChunkTask2) assert.Equal(t, chunkTask2ErrCode, types.Success) assert.Equal(t, chunkTask2ErrMsg, "") - chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success) + chunkProver2.submitProof(t, proverChunkTask2, verifiedSuccess, types.Success, "istanbul") batchProver2 := newMockProver(t, "prover_test"+strconv.Itoa(3), coordinatorURL, message.ProofTypeBatch, version.Version) proverBatchTask2, batchTask2ErrCode, batchTask2ErrMsg := batchProver2.getProverTask(t, message.ProofTypeBatch, "istanbul") assert.NotNil(t, proverBatchTask2) assert.Equal(t, batchTask2ErrCode, types.Success) assert.Equal(t, batchTask2ErrMsg, "") - batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success) + batchProver2.submitProof(t, proverBatchTask2, verifiedSuccess, types.Success, "istanbul") // verify proof status, it should be verified now, because second prover sent valid proof chunkProofStatus2, err := chunkOrm.GetProvingStatusByHash(context.Background(), dbChunk.Hash) diff --git a/coordinator/test/mock_prover.go b/coordinator/test/mock_prover.go index e09efd705d..dfad28b9b0 100644 --- a/coordinator/test/mock_prover.go +++ b/coordinator/test/mock_prover.go @@ -51,9 +51,9 @@ func newMockProver(t *testing.T, proverName string, coordinatorURL string, proof } // connectToCoordinator sets up a websocket client to connect to the prover manager. -func (r *mockProver) connectToCoordinator(t *testing.T) string { +func (r *mockProver) connectToCoordinator(t *testing.T, forkName string) string { challengeString := r.challenge(t) - return r.login(t, challengeString) + return r.login(t, challengeString, forkName) } func (r *mockProver) challenge(t *testing.T) string { @@ -76,18 +76,19 @@ func (r *mockProver) challenge(t *testing.T) string { return loginData.Token } -func (r *mockProver) login(t *testing.T, challengeString string) string { +func (r *mockProver) login(t *testing.T, challengeString string, forkName string) string { authMsg := message.AuthMsg{ Identity: &message.Identity{ Challenge: challengeString, ProverName: r.proverName, ProverVersion: r.proverVersion, + HardForkName: forkName, }, } assert.NoError(t, authMsg.SignWithKey(r.privKey)) - body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\"},\"signature\":\"%s\"}", - authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Signature) + body := fmt.Sprintf("{\"message\":{\"challenge\":\"%s\",\"prover_name\":\"%s\", \"prover_version\":\"%s\", \"hard_fork_name\":\"%s\"},\"signature\":\"%s\"}", + authMsg.Identity.Challenge, authMsg.Identity.ProverName, authMsg.Identity.ProverVersion, authMsg.Identity.HardForkName, authMsg.Signature) var result ctypes.Response client := resty.New() @@ -137,7 +138,7 @@ func (r *mockProver) healthCheckFailure(t *testing.T) bool { func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, forkName string) (*types.GetTaskSchema, int, string) { // get task from coordinator - token := r.connectToCoordinator(t) + token := r.connectToCoordinator(t, forkName) assert.NotEmpty(t, token) type response struct { @@ -151,7 +152,7 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo resp, err := client.R(). SetHeader("Content-Type", "application/json"). SetHeader("Authorization", fmt.Sprintf("Bearer %s", token)). - SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType), "hard_fork_name": forkName}). + SetBody(map[string]interface{}{"prover_height": 100, "task_type": int(proofType)}). SetResult(&result). Post("http://" + r.coordinatorURL + "/coordinator/v1/get_task") assert.NoError(t, err) @@ -160,9 +161,11 @@ func (r *mockProver) getProverTask(t *testing.T, proofType message.ProofType, fo } // Testing expected errors returned by coordinator. -func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) (int, string) { +// +//nolint:unparam +func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType, forkName string) (int, string) { // get task from coordinator - token := r.connectToCoordinator(t) + token := r.connectToCoordinator(t, forkName) assert.NotEmpty(t, token) type response struct { @@ -185,7 +188,7 @@ func (r *mockProver) tryGetProverTask(t *testing.T, proofType message.ProofType) return result.ErrCode, result.ErrMsg } -func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int) { +func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSchema, proofStatus proofStatus, errCode int, forkName string) { proofMsgStatus := message.StatusOk if proofStatus == generatedFailed { proofMsgStatus = message.StatusProofError @@ -228,7 +231,7 @@ func (r *mockProver) submitProof(t *testing.T, proverTaskSchema *types.GetTaskSc submitProof.Proof = string(encodeData) } - token := r.connectToCoordinator(t) + token := r.connectToCoordinator(t, forkName) assert.NotEmpty(t, token) submitProofData, err := json.Marshal(submitProof) diff --git a/prover/client/client.go b/prover/client/client.go index 9845c64b32..09438be230 100644 --- a/prover/client/client.go +++ b/prover/client/client.go @@ -21,14 +21,15 @@ import ( type CoordinatorClient struct { client *resty.Client - proverName string - priv *ecdsa.PrivateKey + proverName string + hardForkName string + priv *ecdsa.PrivateKey mu sync.Mutex } // NewCoordinatorClient constructs a new CoordinatorClient. -func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) { +func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, hardForkName string, priv *ecdsa.PrivateKey) (*CoordinatorClient, error) { client := resty.New(). SetTimeout(time.Duration(cfg.ConnectionTimeoutSec) * time.Second). SetRetryCount(cfg.RetryCount). @@ -50,9 +51,10 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv "retry wait time (second)", cfg.RetryWaitTimeSec) return &CoordinatorClient{ - client: client, - proverName: proverName, - priv: priv, + client: client, + proverName: proverName, + hardForkName: hardForkName, + priv: priv, }, nil } @@ -83,6 +85,7 @@ func (c *CoordinatorClient) Login(ctx context.Context) error { ProverVersion: version.Version, ProverName: c.proverName, Challenge: challengeResult.Data.Token, + HardForkName: c.hardForkName, }, } @@ -97,10 +100,12 @@ func (c *CoordinatorClient) Login(ctx context.Context) error { Challenge string `json:"challenge"` ProverName string `json:"prover_name"` ProverVersion string `json:"prover_version"` + HardForkName string `json:"hard_fork_name"` }{ Challenge: authMsg.Identity.Challenge, ProverName: authMsg.Identity.ProverName, ProverVersion: authMsg.Identity.ProverVersion, + HardForkName: authMsg.Identity.HardForkName, }, Signature: authMsg.Signature, } diff --git a/prover/client/types.go b/prover/client/types.go index fafdec2369..5860deea6a 100644 --- a/prover/client/types.go +++ b/prover/client/types.go @@ -25,6 +25,7 @@ type LoginRequest struct { Challenge string `json:"challenge"` ProverName string `json:"prover_name"` ProverVersion string `json:"prover_version"` + HardForkName string `json:"hard_fork_name"` } `json:"message"` Signature string `json:"signature"` } @@ -41,7 +42,6 @@ type LoginResponse struct { // GetTaskRequest defines the request structure for GetTask API type GetTaskRequest struct { - HardForkName string `json:"hard_fork_name"` TaskType message.ProofType `json:"task_type"` ProverHeight uint64 `json:"prover_height,omitempty"` VK string `json:"vk"` diff --git a/prover/prover.go b/prover/prover.go index 41277d10c1..eb20e7f930 100644 --- a/prover/prover.go +++ b/prover/prover.go @@ -82,7 +82,7 @@ func NewProver(ctx context.Context, cfg *config.Config) (*Prover, error) { } log.Info("init prover_core successfully!") - coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, priv) + coordinatorClient, err := client.NewCoordinatorClient(cfg.Coordinator, cfg.ProverName, cfg.HardForkName, priv) if err != nil { return nil, err } @@ -178,8 +178,7 @@ func (r *Prover) proveAndSubmit() error { func (r *Prover) fetchTaskFromCoordinator() (*store.ProvingTask, error) { // prepare the request req := &client.GetTaskRequest{ - HardForkName: r.cfg.HardForkName, - TaskType: r.Type(), + TaskType: r.Type(), // we may not be able to get the vk at the first time, so we should pass vk to the coordinator every time we getTask // instead of passing vk when we login VK: r.proverCore.VK, diff --git a/rollup/internal/controller/relayer/l2_relayer.go b/rollup/internal/controller/relayer/l2_relayer.go index c13fc81654..6ffb5254f1 100644 --- a/rollup/internal/controller/relayer/l2_relayer.go +++ b/rollup/internal/controller/relayer/l2_relayer.go @@ -586,6 +586,24 @@ func (r *Layer2Relayer) finalizeBatch(dbBatch *orm.Batch, withProof bool) error log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", dbBatch.Index, "batch hash", dbBatch.Hash, "tx hash", txHash.String(), "err", err) return err } + + // Updating the proving status when finalizing without proof, thus the coordinator could omit this task. + // it isn't a necessary step, so don't put in a transaction with UpdateFinalizeTxHashAndRollupStatus + if !withProof { + txErr := r.db.Transaction(func(tx *gorm.DB) error { + if updateErr := r.batchOrm.UpdateProvingStatus(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil { + return updateErr + } + if updateErr := r.chunkOrm.UpdateProvingStatusByBatchHash(r.ctx, dbBatch.Hash, types.ProvingTaskVerified); updateErr != nil { + return updateErr + } + return nil + }) + if txErr != nil { + log.Error("Updating chunk and batch proving status when finalizing without proof failure", "batchHash", dbBatch.Hash, "err", txErr) + } + } + r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc() return nil } diff --git a/rollup/internal/controller/relayer/l2_relayer_test.go b/rollup/internal/controller/relayer/l2_relayer_test.go index 1a9b9988af..adacfdb0fc 100644 --- a/rollup/internal/controller/relayer/l2_relayer_test.go +++ b/rollup/internal/controller/relayer/l2_relayer_test.go @@ -7,6 +7,7 @@ import ( "net/http" "strings" "testing" + "time" "github.com/agiledragon/gomonkey/v2" "github.com/gin-gonic/gin" @@ -181,9 +182,9 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { err = l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block1, block2}) assert.NoError(t, err) chunkOrm := orm.NewChunk(db) - _, err = chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion) + chunkDB1, err := chunkOrm.InsertChunk(context.Background(), chunk1, codecVersion) assert.NoError(t, err) - _, err = chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion) + chunkDB2, err := chunkOrm.InsertChunk(context.Background(), chunk2, codecVersion) assert.NoError(t, err) batch := &encoding.Batch{ @@ -200,11 +201,30 @@ func testL2RelayerFinalizeTimeoutBatches(t *testing.T) { err = batchOrm.UpdateRollupStatus(context.Background(), dbBatch.Hash, types.RollupCommitted) assert.NoError(t, err) + err = chunkOrm.UpdateBatchHashInRange(context.Background(), chunkDB1.Index, chunkDB2.Index, dbBatch.Hash, nil) + assert.NoError(t, err) + // Check the database for the updated status using TryTimes. ok := utils.TryTimes(5, func() bool { relayer.ProcessCommittedBatches() - statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{dbBatch.Hash}) - return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing + time.Sleep(time.Second) + + batchInDB, batchErr := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": dbBatch.Hash}, nil, 0) + if batchErr != nil { + return false + } + chunks, chunkErr := chunkOrm.GetChunksByBatchHash(context.Background(), dbBatch.Hash) + if chunkErr != nil { + return false + } + + batchStatus := len(batchInDB) == 1 && types.RollupStatus(batchInDB[0].RollupStatus) == types.RollupFinalizing && + types.ProvingStatus(batchInDB[0].ProvingStatus) == types.ProvingTaskVerified + + chunkStatus := len(chunks) == 2 && types.ProvingStatus(chunks[0].ProvingStatus) == types.ProvingTaskVerified && + types.ProvingStatus(chunks[1].ProvingStatus) == types.ProvingTaskVerified + + return batchStatus && chunkStatus }) assert.True(t, ok) relayer.StopSenders() diff --git a/rollup/internal/orm/chunk.go b/rollup/internal/orm/chunk.go index 194cd92be0..79023ede3a 100644 --- a/rollup/internal/orm/chunk.go +++ b/rollup/internal/orm/chunk.go @@ -140,6 +140,20 @@ func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) ( return chunks, nil } +// GetChunksByBatchHash retrieves chunks by batch hash +// for test +func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*Chunk, error) { + db := o.db.WithContext(ctx) + db = db.Model(&Chunk{}) + db = db.Where("batch_hash = ?", batchHash) + + var chunks []*Chunk + if err := db.Find(&chunks).Error; err != nil { + return nil, fmt.Errorf("Chunk.GetChunksByBatchHash error: %w", err) + } + return chunks, nil +} + // InsertChunk inserts a new chunk into the database. func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, dbTX ...*gorm.DB) (*Chunk, error) { if chunk == nil || len(chunk.Blocks) == 0 { @@ -242,6 +256,34 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ return nil } +// UpdateProvingStatusByBatchHash updates the proving_status for chunks within the specified batch_hash +func (o *Chunk) UpdateProvingStatusByBatchHash(ctx context.Context, batchHash string, status types.ProvingStatus, dbTX ...*gorm.DB) error { + updateFields := make(map[string]interface{}) + updateFields["proving_status"] = int(status) + + switch status { + case types.ProvingTaskAssigned: + updateFields["prover_assigned_at"] = time.Now() + case types.ProvingTaskUnassigned: + updateFields["prover_assigned_at"] = nil + case types.ProvingTaskVerified: + updateFields["proved_at"] = time.Now() + } + + db := o.db + if len(dbTX) > 0 && dbTX[0] != nil { + db = dbTX[0] + } + db = db.WithContext(ctx) + db = db.Model(&Chunk{}) + db = db.Where("batch_hash = ?", batchHash) + + if err := db.Updates(updateFields).Error; err != nil { + return fmt.Errorf("Chunk.UpdateProvingStatusByBatchHash error: %w, batch hash: %v, status: %v", err, batchHash, status.String()) + } + return nil +} + // UpdateBatchHashInRange updates the batch_hash for chunks within the specified range (inclusive). // The range is closed, i.e., it includes both start and end indices. func (o *Chunk) UpdateBatchHashInRange(ctx context.Context, startIndex uint64, endIndex uint64, batchHash string, dbTX ...*gorm.DB) error {