diff --git a/README.md b/README.md index 5ec270678..e850261e2 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ - stuck packet search now syncs correctly - will no longer unnecessarily wait for a block to elapse before syncing (good for slow chains) - recommended to use fast query loop for fast rollapp chains (`min-loop-duration: 100ms`) +- in the upstream version, the relayer will try to flush every 5 seconds if acks have not yet been produced by the hub. Get around this by setting `--hub-chain-id dymension_1100-1 --flush-ignore-hub-acks` ![banner](./docs/images/comp.gif) diff --git a/cmd/appstate.go b/cmd/appstate.go index 44aff4736..57f03a646 100644 --- a/cmd/appstate.go +++ b/cmd/appstate.go @@ -66,14 +66,14 @@ func (a *appState) loadConfigFile(ctx context.Context) error { // read the config file bytes file, err := os.ReadFile(cfgPath) if err != nil { - return fmt.Errorf("error reading file: %w", err) + return fmt.Errorf("reading file: %w", err) } // unmarshall them into the wrapper struct cfgWrapper := &ConfigInputWrapper{} err = yaml.Unmarshal(file, cfgWrapper) if err != nil { - return fmt.Errorf("error unmarshalling config: %w", err) + return fmt.Errorf("unmarshalling config: %w", err) } if a.log == nil { @@ -88,7 +88,7 @@ func (a *appState) loadConfigFile(ctx context.Context) error { // validate runtime configuration if err := newCfg.validateConfig(); err != nil { - return fmt.Errorf("error parsing chain config: %w", err) + return fmt.Errorf("parsing chain config: %w", err) } // save runtime configuration in app state @@ -201,11 +201,11 @@ func (a *appState) performConfigLockingOperation(ctx context.Context, operation fileLock := flock.New(lockFilePath) _, err := fileLock.TryLock() if err != nil { - return fmt.Errorf("failed to acquire config lock: %w", err) + return fmt.Errorf("acquire config lock: %w", err) } defer func() { if err := fileLock.Unlock(); err != nil { - a.log.Error("error unlocking config file lock, please manually delete", + a.log.Error("Unlocking config file lock, please manually delete.", zap.String("filepath", lockFilePath), ) } @@ -214,7 +214,7 @@ func (a *appState) performConfigLockingOperation(ctx context.Context, operation // load config from file and validate it. don't want to miss // any changes that may have been made while unlocked. if err := a.loadConfigFile(ctx); err != nil { - return fmt.Errorf("failed to initialize config from file: %w", err) + return fmt.Errorf("initialize config from file: %w", err) } // perform the operation that requires config flock. @@ -224,7 +224,7 @@ func (a *appState) performConfigLockingOperation(ctx context.Context, operation // validate config after changes have been made. if err := a.config.validateConfig(); err != nil { - return fmt.Errorf("error parsing chain config: %w", err) + return fmt.Errorf("parsing chain config: %w", err) } // marshal the new config @@ -236,8 +236,8 @@ func (a *appState) performConfigLockingOperation(ctx context.Context, operation cfgPath := a.configPath() // Overwrite the config file. - if err := os.WriteFile(cfgPath, out, 0600); err != nil { - return fmt.Errorf("failed to write config file at %s: %w", cfgPath, err) + if err := os.WriteFile(cfgPath, out, 0o600); err != nil { + return fmt.Errorf("write config file at %s: %w", cfgPath, err) } return nil @@ -277,7 +277,6 @@ func (a *appState) updatePathConfig( } func (a *appState) useKey(chainName, key string) error { - chain, exists := a.config.Chains[chainName] if !exists { return fmt.Errorf("chain %s not found in config", chainName) @@ -308,7 +307,6 @@ func (a *appState) useKey(chainName, key string) error { } func (a *appState) useRpcAddr(chainName string, rpcAddr string) error { - _, exists := a.config.Chains[chainName] if !exists { return fmt.Errorf("chain %s not found in config", chainName) diff --git a/cmd/chains.go b/cmd/chains.go index d39396412..fcdbfe192 100644 --- a/cmd/chains.go +++ b/cmd/chains.go @@ -409,7 +409,7 @@ func addChainFromFile(a *appState, chainName string, file string) error { a.homePath, a.debug, chainName, ) if err != nil { - return fmt.Errorf("failed to build ChainProvider for %s: %w", file, err) + return fmt.Errorf("build ChainProvider for %s: %w", file, err) } c := relayer.NewChain(a.log, prov, a.debug) @@ -449,7 +449,7 @@ func addChainFromURL(a *appState, chainName string, rawurl string) error { a.homePath, a.debug, chainName, ) if err != nil { - return fmt.Errorf("failed to build ChainProvider for %s: %w", rawurl, err) + return fmt.Errorf("build ChainProvider for %s: %w", rawurl, err) } c := relayer.NewChain(a.log, prov, a.debug) @@ -466,8 +466,8 @@ func addChainsFromRegistry(ctx context.Context, a *appState, forceAdd, testnet b for _, chain := range chains { if _, ok := a.config.Chains[chain]; ok { - a.log.Warn( - "Chain already exists", + a.log.Error( + "Chain already exists.", zap.String("chain", chain), zap.String("source_link", chainRegistry.SourceLink()), ) @@ -477,8 +477,8 @@ func addChainsFromRegistry(ctx context.Context, a *appState, forceAdd, testnet b chainInfo, err := chainRegistry.GetChain(ctx, testnet, chain) if err != nil { - a.log.Warn( - "Error retrieving chain", + a.log.Error( + "Get chain.", zap.String("chain", chain), zap.Error(err), ) @@ -488,8 +488,8 @@ func addChainsFromRegistry(ctx context.Context, a *appState, forceAdd, testnet b chainConfig, err := chainInfo.GetChainConfig(ctx, forceAdd, testnet, chain) if err != nil { - a.log.Warn( - "Error generating chain config", + a.log.Error( + "Get chain config.", zap.String("chain", chain), zap.Error(err), ) @@ -505,8 +505,8 @@ func addChainsFromRegistry(ctx context.Context, a *appState, forceAdd, testnet b a.homePath, a.debug, chainInfo.ChainName, ) if err != nil { - a.log.Warn( - "Failed to build ChainProvider", + a.log.Error( + "Chain config new provider.", zap.String("chain_id", chainConfig.ChainID), zap.Error(err), ) @@ -517,8 +517,8 @@ func addChainsFromRegistry(ctx context.Context, a *appState, forceAdd, testnet b // add to config c := relayer.NewChain(a.log, prov, a.debug) if err = a.config.AddChain(c); err != nil { - a.log.Warn( - "Failed to add chain to config", + a.log.Error( + "Config add chain.", zap.String("chain", chain), zap.Error(err), ) @@ -530,7 +530,7 @@ func addChainsFromRegistry(ctx context.Context, a *appState, forceAdd, testnet b // found the correct chain so move on to next chain in chains } - a.log.Info("Config update status", + a.log.Info("Config update status.", zap.Any("added", added), zap.Any("failed", failed), zap.Any("already existed", existed), diff --git a/cmd/config.go b/cmd/config.go index 2b8bde934..b5bd1d0de 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -245,21 +245,21 @@ func addPathsFromDirectory(ctx context.Context, stderr io.Writer, a *appState, d byt, err := os.ReadFile(pth) if err != nil { - return fmt.Errorf("failed to read file %s: %w", pth, err) + return fmt.Errorf("read file %s: %w", pth, err) } p := &relayer.Path{} if err = json.Unmarshal(byt, p); err != nil { - return fmt.Errorf("failed to unmarshal file %s: %w", pth, err) + return fmt.Errorf("unmarshal file %s: %w", pth, err) } pthName := strings.Split(f.Name(), ".")[0] if err := a.config.ValidatePath(ctx, stderr, p); err != nil { - return fmt.Errorf("failed to validate path %s: %w", pth, err) + return fmt.Errorf("validate path %s: %w", pth, err) } if err := a.config.AddPath(pthName, p); err != nil { - return fmt.Errorf("failed to add path %s: %w", pth, err) + return fmt.Errorf("add path %s: %w", pth, err) } fmt.Fprintf(stderr, "added path %s...\n\n", pthName) @@ -339,11 +339,11 @@ func (c *ConfigInputWrapper) RuntimeConfig(ctx context.Context, a *appState) (*C a.homePath, a.debug, chainName, ) if err != nil { - return nil, fmt.Errorf("failed to build ChainProviders: %w", err) + return nil, fmt.Errorf("build ChainProviders: %w", err) } if err := prov.Init(ctx); err != nil { - return nil, fmt.Errorf("failed to initialize provider: %w", err) + return nil, fmt.Errorf("initialize provider: %w", err) } chain := relayer.NewChain(a.log, prov, a.debug) @@ -591,7 +591,7 @@ func (c *Config) validateConfig() error { // verify that the channel filter rule is valid for every path in the config for _, p := range c.Paths { if err := p.ValidateChannelFilterRule(); err != nil { - return fmt.Errorf("error initializing the relayer config for path %s: %w", p.String(), err) + return fmt.Errorf("initializing the relayer config for path %s: %w", p.String(), err) } } diff --git a/cmd/feegrant.go b/cmd/feegrant.go index 2d7055b4e..86b2c1971 100644 --- a/cmd/feegrant.go +++ b/cmd/feegrant.go @@ -75,7 +75,6 @@ func feegrantConfigureBasicCmd(a *appState) *cobra.Command { } if delete { - a.log.Info("Deleting feegrant configuration", zap.String("chain", chain)) cfgErr := a.performConfigLockingOperation(cmd.Context(), func() error { chain := a.config.Chains[chain] @@ -84,6 +83,7 @@ func feegrantConfigureBasicCmd(a *appState) *cobra.Command { return nil }) cobra.CheckErr(cfgErr) + a.log.Info("Deleted fee grant configuration.", zap.String("chain", chain)) return nil } @@ -142,7 +142,7 @@ func feegrantConfigureBasicCmd(a *appState) *cobra.Command { ctx := cmd.Context() _, err = prov.EnsureBasicGrants(ctx, memo, gas) if err != nil { - return fmt.Errorf("error writing grants on chain: '%s'", err.Error()) + return fmt.Errorf("writing grants on chain: '%s'", err.Error()) } // Get latest height from the chain, mark feegrant configuration as verified up to that height. @@ -157,7 +157,7 @@ func feegrantConfigureBasicCmd(a *appState) *cobra.Command { prov.PCfg.FeeGrants.IsExternalGranter = externalGranter oldProv.PCfg.FeeGrants = prov.PCfg.FeeGrants oldProv.PCfg.FeeGrants.BlockHeightVerified = h - a.log.Info("feegrant configured", zap.Int64("height", h)) + a.log.Info("Configured feegrant.", zap.Int64("height", h)) return nil }) cobra.CheckErr(cfgErr) @@ -205,7 +205,7 @@ func feegrantBasicGrantsCmd(a *appState) *cobra.Command { granterAcc, err := prov.AccountFromKeyOrAddress(keyNameOrAddress) if err != nil { - a.log.Error("Unknown account", zap.String("key_or_address", keyNameOrAddress), zap.Error(err)) + a.log.Error("Unknown account.", zap.String("key_or_address", keyNameOrAddress), zap.Error(err)) return err } granterAddr := prov.MustEncodeAccAddr(granterAcc) @@ -218,7 +218,7 @@ func feegrantBasicGrantsCmd(a *appState) *cobra.Command { for _, grant := range res { allowance, e := prov.Sprint(grant.Allowance) cobra.CheckErr(e) - a.log.Info("feegrant", zap.String("granter", grant.Granter), zap.String("grantee", grant.Grantee), zap.String("allowance", allowance)) + a.log.Info("Feegrant.", zap.String("granter", grant.Granter), zap.String("grantee", grant.Grantee), zap.String("allowance", allowance)) } return nil diff --git a/cmd/flags.go b/cmd/flags.go index 90b6eeb87..5d0b8b780 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -62,6 +62,8 @@ const ( flagStuckPacketChainID = "stuck-packet-chain-id" flagStuckPacketHeightStart = "stuck-packet-height-start" flagStuckPacketHeightEnd = "stuck-packet-height-end" + flagHubChainID = "hub-chain-id" + flagFlushIgnoreHubAcks = "flush-ignore-hub-acks" ) const blankValue = "blank" @@ -494,6 +496,18 @@ func addOutputFlag(v *viper.Viper, cmd *cobra.Command) *cobra.Command { return cmd } +func addHubAckRetryFlags(v *viper.Viper, cmd *cobra.Command) *cobra.Command { + cmd.Flags().String(flagHubChainID, "", "hub chain id") + if err := v.BindPFlag(flagHubChainID, cmd.Flags().Lookup(flagHubChainID)); err != nil { + panic(err) + } + cmd.Flags().Bool(flagFlushIgnoreHubAcks, false, "ignore missing acks on the hub when flushing") + if err := v.BindPFlag(flagFlushIgnoreHubAcks, cmd.Flags().Lookup(flagFlushIgnoreHubAcks)); err != nil { + panic(err) + } + return cmd +} + func stuckPacketFlags(v *viper.Viper, cmd *cobra.Command) *cobra.Command { cmd.Flags().String(flagStuckPacketChainID, "", "chain ID with the stuck packet(s)") if err := v.BindPFlag(flagStuckPacketChainID, cmd.Flags().Lookup(flagStuckPacketChainID)); err != nil { diff --git a/cmd/keys.go b/cmd/keys.go index ec7013c35..ad9fadd09 100644 --- a/cmd/keys.go +++ b/cmd/keys.go @@ -58,7 +58,6 @@ func keysCmd(a *appState) *cobra.Command { } func keysUseCmd(a *appState) *cobra.Command { - cmd := &cobra.Command{ Use: "use chain_name key_name", Aliases: []string{"u"}, @@ -125,7 +124,7 @@ $ %s k a cosmoshub testkey`, appName, appName, appName)), ko, err := chain.ChainProvider.AddKey(keyName, uint32(coinType), algo) if err != nil { - return fmt.Errorf("failed to add key: %w", err) + return fmt.Errorf("add key: %w", err) } out, err := json.Marshal(&ko) @@ -155,7 +154,6 @@ $ %s keys restore ibc-0 testkey "[mnemonic-words]" $ %s k r cosmoshub faucet-key "[mnemonic-words]" $ %s k r demo-key "[mnemonic-words]" --restore-all`, appName, appName, appName)), RunE: func(cmd *cobra.Command, args []string) error { - cmdFlags := cmd.Flags() restoreAll, err := cmdFlags.GetBool(flagRestoreAll) @@ -312,7 +310,7 @@ func askForConfirmation(a *appState, stdin io.Reader, stderr io.Writer) bool { _, err := fmt.Fscanln(stdin, &response) if err != nil { - a.log.Fatal("Failed to read input", zap.Error(err)) + a.log.Fatal("Fscanln.", zap.Error(err)) } switch strings.ToLower(response) { diff --git a/cmd/paths.go b/cmd/paths.go index b5583fb69..37c4168fe 100644 --- a/cmd/paths.go +++ b/cmd/paths.go @@ -426,7 +426,6 @@ $ %s pth fch`, appName, defaultHome, appName, appName)), regPath = path.Join("testnets", "_IBC", fileName) } else { regPath = path.Join("_IBC", fileName) - } client, _, err := client.Repositories.DownloadContents(cmd.Context(), "cosmos", "chain-registry", regPath, nil) if err != nil { @@ -441,12 +440,12 @@ $ %s pth fch`, appName, defaultHome, appName, appName)), b, err := io.ReadAll(client) if err != nil { - return fmt.Errorf("error reading response body: %w", err) + return fmt.Errorf("reading response body: %w", err) } ibc := &relayer.IBCdata{} if err = json.Unmarshal(b, &ibc); err != nil { - return fmt.Errorf("failed to unmarshal: %w ", err) + return fmt.Errorf("unmarshal: %w ", err) } srcChainName := ibc.Chain1.ChainName @@ -469,7 +468,7 @@ $ %s pth fch`, appName, defaultHome, appName, appName)), client.Close() if err = a.config.AddPath(pthName, newPath); err != nil { - return fmt.Errorf("failed to add path %s: %w", pthName, err) + return fmt.Errorf("add path %s: %w", pthName, err) } fmt.Fprintf(cmd.ErrOrStderr(), "added: %s\n", pthName) diff --git a/cmd/start.go b/cmd/start.go index c508f8732..205777a6e 100644 --- a/cmd/start.go +++ b/cmd/start.go @@ -106,20 +106,20 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), } if debugAddr == "" { - a.log.Info("Skipping debug server due to empty debug address flag") + a.log.Info("Skipping debug server due to empty debug address flag.") } else { ln, err := net.Listen("tcp", debugAddr) if err != nil { a.log.Error( - "Failed to listen on debug address. If you have another relayer process open, use --" + + "Listen on debug address. If you have another relayer process open, use --" + flagDebugAddr + " to pick a different address.", ) - return fmt.Errorf("failed to listen on debug address %q: %w", debugAddr, err) + return fmt.Errorf("listen on debug address %q: %w", debugAddr, err) } log := a.log.With(zap.String("sys", "debughttp")) - log.Info("Debug server listening", zap.String("addr", debugAddr)) + log.Info("Debug server listening.", zap.String("addr", debugAddr)) prometheusMetrics = processor.NewPrometheusMetrics() relaydebug.StartDebugServer(cmd.Context(), log, ln, prometheusMetrics.Registry) for _, chain := range chains { @@ -154,6 +154,26 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), return err } + var skippedPacketsHandlingCfg *processor.SkippedPacketsHandlingConfig + { + ignoreAcks, err := cmd.Flags().GetBool(flagFlushIgnoreHubAcks) + if err != nil { + return err + } + hubChain, err := cmd.Flags().GetString(flagHubChainID) + if err != nil { + return err + } + if ignoreAcks && hubChain == "" { + return errors.New("must supply hub chain id if ignoring hub acks when flushing") + } + + skippedPacketsHandlingCfg = &processor.SkippedPacketsHandlingConfig{ + HubChainID: hubChain, + IgnoreHubAcksWhenFlushing: ignoreAcks, + } + } + rlyErrCh := relayer.StartRelayer( cmd.Context(), a.log, @@ -170,6 +190,7 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), initialBlockHistory, prometheusMetrics, stuckPacket, + skippedPacketsHandlingCfg, ) // Block until the error channel sends a message. @@ -177,8 +198,8 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), // so we don't want to separately monitor the ctx.Done channel, // because we would risk returning before the relayer cleans up. if err := <-rlyErrCh; err != nil && !errors.Is(err, context.Canceled) { - a.log.Warn( - "Relayer start error", + a.log.Error( + "Start relayer.", zap.Error(err), ) return err @@ -194,5 +215,6 @@ $ %s start demo-path2 --max-tx-size 10`, appName, appName, appName, appName)), cmd = flushIntervalFlag(a.viper, cmd) cmd = memoFlag(a.viper, cmd) cmd = stuckPacketFlags(a.viper, cmd) + cmd = addHubAckRetryFlags(a.viper, cmd) return cmd } diff --git a/cmd/tx.go b/cmd/tx.go index 47f37a41d..8fd49cb28 100644 --- a/cmd/tx.go +++ b/cmd/tx.go @@ -218,7 +218,7 @@ func createClientCmd(a *appState) *cobra.Command { if err = retry.Do(func() error { srch, dsth, err = relayer.QueryLatestHeights(cmd.Context(), src, dst) if srch == 0 || dsth == 0 || err != nil { - return fmt.Errorf("failed to query latest heights: %w", err) + return fmt.Errorf("query latest heights: %w", err) } return err }, retry.Context(cmd.Context()), relayer.RtyAtt, relayer.RtyDel, relayer.RtyErr); err != nil { @@ -240,7 +240,7 @@ func createClientCmd(a *appState) *cobra.Command { relayer.RtyErr, retry.OnRetry(func(n uint, err error) { a.log.Info( - "Failed to get light signed header", + "Get light signed header.", zap.String("src_chain_id", src.ChainID()), zap.Int64("src_height", srch), zap.String("dst_chain_id", dst.ChainID()), @@ -776,7 +776,7 @@ $ %s tx connect demo-path --src-port transfer --dst-port transfer --order unorde memo, ) if err != nil { - return fmt.Errorf("error creating clients: %w", err) + return fmt.Errorf("creating clients: %w", err) } if clientSrc != "" || clientDst != "" { @@ -796,7 +796,7 @@ $ %s tx connect demo-path --src-port transfer --dst-port transfer --order unorde pathName, ) if err != nil { - return fmt.Errorf("error creating connections: %w", err) + return fmt.Errorf("creating connections: %w", err) } if connectionSrc != "" || connectionDst != "" { @@ -847,7 +847,7 @@ $ %s tx link-then-start demo-path --timeout 5s`, appName, appName)), lCmd := linkCmd(a) for err := lCmd.RunE(cmd, args); err != nil; err = lCmd.RunE(cmd, args) { - a.log.Info("Error running link; retrying", zap.Error(err)) + a.log.Info("Running link; retrying.", zap.Error(err)) select { case <-time.After(time.Second): // Keep going. @@ -968,6 +968,7 @@ $ %s tx flush demo-path channel-0`, 0, nil, stuckPacket, + nil, ) // Block until the error channel sends a message. @@ -975,8 +976,8 @@ $ %s tx flush demo-path channel-0`, // so we don't want to separately monitor the ctx.Done channel, // because we would risk returning before the relayer cleans up. if err := <-rlyErrCh; err != nil && !errors.Is(err, context.Canceled) { - a.log.Warn( - "Relayer start error", + a.log.Error( + "Start relayer.", zap.Error(err), ) return err @@ -1004,7 +1005,7 @@ $ %s tx relay-pkts demo-path channel-0`, appName, appName, )), RunE: func(cmd *cobra.Command, args []string) error { - a.log.Warn("This command is deprecated. Please use 'tx flush' command instead") + a.log.Error("This command is deprecated. Please use 'tx flush' command instead.") return flushCmd(a).RunE(cmd, args) }, } @@ -1026,7 +1027,7 @@ $ %s tx relay-acks demo-path channel-0 -l 3 -s 6`, appName, appName, )), RunE: func(cmd *cobra.Command, args []string) error { - a.log.Warn("This command is deprecated. Please use 'tx flush' command instead") + a.log.Error("This command is deprecated. Please use 'tx flush' command instead.") return flushCmd(a).RunE(cmd, args) }, } diff --git a/cregistry/chain_info.go b/cregistry/chain_info.go index 0bb66e3d3..332d53b7b 100644 --- a/cregistry/chain_info.go +++ b/cregistry/chain_info.go @@ -162,14 +162,14 @@ func (c ChainInfo) GetRPCEndpoints(ctx context.Context) (out []string, err error if err != nil { unhealthy += 1 c.log.Debug( - "Ignoring endpoint due to error", + "Ignoring endpoint due to error.", zap.String("endpoint", endpoint), zap.Error(err), ) return nil } healthy += 1 - c.log.Debug("Verified healthy endpoint", zap.String("endpoint", endpoint)) + c.log.Debug("Verified healthy endpoint.", zap.String("endpoint", endpoint)) endpoints = append(endpoints, endpoint) return nil }) @@ -177,7 +177,7 @@ func (c ChainInfo) GetRPCEndpoints(ctx context.Context) (out []string, err error if err := eg.Wait(); err != nil { return nil, err } - c.log.Info("Endpoints queried", + c.log.Info("Queried endpoints.", zap.String("chain_name", c.ChainName), zap.Int("healthy", healthy), zap.Int("unhealthy", unhealthy), @@ -202,7 +202,7 @@ func (c ChainInfo) GetRandomRPCEndpoint(ctx context.Context, forceAdd bool) (str randomGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) endpoint := rpcs[randomGenerator.Intn(len(rpcs))] - c.log.Info("Endpoint selected", + c.log.Info("Selected endpoint", zap.String("chain_name", c.ChainName), zap.String("endpoint", endpoint), ) @@ -214,10 +214,8 @@ func (c ChainInfo) GetAssetList(ctx context.Context, testnet bool, name string) var chainRegURL string if testnet { chainRegURL = fmt.Sprintf("https://raw.githubusercontent.com/cosmos/chain-registry/master/testnets/%s/assetlist.json", name) - } else { chainRegURL = fmt.Sprintf("https://raw.githubusercontent.com/cosmos/chain-registry/master/%s/assetlist.json", name) - } res, err := http.Get(chainRegURL) if err != nil { @@ -241,7 +239,6 @@ func (c ChainInfo) GetAssetList(ctx context.Context, testnet bool, name string) return AssetList{}, err } return assetList, nil - } // GetChainConfig returns a CosmosProviderConfig composed from the details found in the cosmos chain registry for diff --git a/cregistry/chain_info_test.go b/cregistry/chain_info_test.go index eb54a11c4..11bbd87ac 100644 --- a/cregistry/chain_info_test.go +++ b/cregistry/chain_info_test.go @@ -48,7 +48,7 @@ func TestGetAllRPCEndpoints(t *testing.T) { expectedEndpoints: []string{"http://test.com:80/rpc"}, expectedError: nil, }, - "unsupported or invalid url scheme error": { + "unsupported or invalid url scheme": { chainInfo: ChainInfoWithRPCEndpoint("ftp://test.com/rpc"), expectedEndpoints: nil, expectedError: fmt.Errorf("invalid or unsupported url scheme: ftp"), diff --git a/interchaintest/docker.go b/interchaintest/docker.go index 384b5e4c1..68ab3fb81 100644 --- a/interchaintest/docker.go +++ b/interchaintest/docker.go @@ -33,28 +33,29 @@ type dockerErrorDetail struct { func uniqueRelayerImageName() (string, error) { uuid, err := uuid.NewRandom() if err != nil { - return "", fmt.Errorf("failed to generate uuid %v", err) + return "", fmt.Errorf("generate uuid %v", err) } return RelayerImagePrefix + uuid.String()[:6], nil } + func BuildRelayerImage(t *testing.T) string { _, b, _, _ := runtime.Caller(0) basepath := filepath.Join(filepath.Dir(b), "..") tar, err := archive.TarWithOptions(basepath, &archive.TarOptions{}) - require.NoError(t, err, "error archiving relayer for docker image build") + require.NoError(t, err, "archiving relayer for docker image build") cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - require.NoError(t, err, "error building docker client") + require.NoError(t, err, "building docker client") image, err := uniqueRelayerImageName() - require.NoError(t, err, "error generating unique tag for docker image") + require.NoError(t, err, "generating unique tag for docker image") res, err := cli.ImageBuild(context.Background(), tar, dockertypes.ImageBuildOptions{ Dockerfile: "local.Dockerfile", Tags: []string{image}, }) - require.NoError(t, err, "error building docker image") + require.NoError(t, err, "building docker image") defer res.Body.Close() t.Cleanup(func() { @@ -67,14 +68,14 @@ func BuildRelayerImage(t *testing.T) string { func destroyRelayerImage(t *testing.T, image string) { // Create a Docker client cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - require.NoError(t, err, "error building docker client") + require.NoError(t, err, "building docker client") // Remove the Docker image using the provided tag (uniquestr) _, err = cli.ImageRemove(context.Background(), image, dockertypes.ImageRemoveOptions{ Force: true, // Force remove the image PruneChildren: true, // Remove all child images }) - require.NoError(t, err, "error removing docker image") + require.NoError(t, err, "removing docker image") } func handleDockerBuildOutput(t *testing.T, body io.Reader) { diff --git a/interchaintest/feegrant_test.go b/interchaintest/feegrant_test.go index 9b9cbf951..632da3d42 100644 --- a/interchaintest/feegrant_test.go +++ b/interchaintest/feegrant_test.go @@ -96,7 +96,7 @@ func TestRelayerFeeGrant(t *testing.T) { // GasAdjustment: 1.3, // }} - var tests = [][]*interchaintest.ChainSpec{ + tests := [][]*interchaintest.ChainSpec{ { {Name: "gaia", ChainName: "gaia", Version: "v14.1.0", NumValidators: &nv, NumFullNodes: &nf}, {Name: "osmosis", ChainName: "osmosis", Version: "v14.0.1", NumValidators: &nv, NumFullNodes: &nf}, @@ -110,7 +110,6 @@ func TestRelayerFeeGrant(t *testing.T) { for _, tt := range tests { testname := fmt.Sprintf("%s,%s", tt[0].Name, tt[1].Name) t.Run(testname, func(t *testing.T) { - // Chain Factory cf := interchaintest.NewBuiltinChainFactory(zaptest.NewLogger(t), tt) @@ -201,64 +200,64 @@ func TestRelayerFeeGrant(t *testing.T) { rand.Seed(time.Now().UnixNano()) - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, gaia.Config(), gaiaGranterWallet.KeyName(), gaiaGranterWallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, gaia.Config(), gaiaGranteeWallet.KeyName(), gaiaGranteeWallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, gaia.Config(), gaiaGrantee2Wallet.KeyName(), gaiaGrantee2Wallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, gaia.Config(), gaiaGrantee3Wallet.KeyName(), gaiaGrantee3Wallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, osmosis.Config(), osmosisUser.KeyName(), osmosisUser.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", osmosis.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", osmosis.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, osmosis.Config(), gaiaUser.KeyName(), gaiaUser.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } gaiaGranteeAddr := gaiaGranteeWallet.FormattedAddress() @@ -268,20 +267,20 @@ func TestRelayerFeeGrant(t *testing.T) { granteeCsv := gaiaGranteeWallet.KeyName() + "," + gaiaGrantee2Wallet.KeyName() + "," + gaiaGrantee3Wallet.KeyName() - //You MUST run the configure feegrant command prior to starting the relayer, otherwise it'd be like you never set it up at all (within this test) - //Note that Gaia supports feegrants, but Osmosis does not (x/feegrant module, or any compatible module, is not included in Osmosis SDK app modules) + // You MUST run the configure feegrant command prior to starting the relayer, otherwise it'd be like you never set it up at all (within this test) + // Note that Gaia supports feegrants, but Osmosis does not (x/feegrant module, or any compatible module, is not included in Osmosis SDK app modules) localRelayer := r.(*Relayer) res := localRelayer.Sys().Run(logger, "chains", "configure", "feegrant", "basicallowance", gaia.Config().ChainID, gaiaGranterWallet.KeyName(), "--grantees", granteeCsv, "--overwrite-granter") if res.Err != nil { fmt.Printf("configure feegrant results: %s\n", res.Stdout.String()) - t.Fatalf("failed to rly config feegrants: %v", res.Err) + t.Fatalf("rly config feegrants: %v", res.Err) } - //Map of feegranted chains and the feegrant info for the chain + // Map of feegranted chains and the feegrant info for the chain feegrantedChains := map[string]*chainFeegrantInfo{} feegrantedChains[gaia.Config().ChainID] = &chainFeegrantInfo{granter: gaiaGranterAddr, grantees: []string{gaiaGranteeAddr, gaiaGrantee2Addr, gaiaGrantee3Addr}} - time.Sleep(14 * time.Second) //commit a couple blocks + time.Sleep(14 * time.Second) // commit a couple blocks r.StartRelayer(ctx, eRep, ibcPath) // Send Transaction @@ -375,7 +374,7 @@ func TestRelayerFeeGrant(t *testing.T) { require.NoError(t, err) require.NoError(t, eg.Wait()) - feegrantMsgSigners := map[string][]string{} //chain to list of signers + feegrantMsgSigners := map[string][]string{} // chain to list of signers for len(processor.PathProcMessageCollector) > 0 { select { @@ -386,8 +385,8 @@ func TestRelayerFeeGrant(t *testing.T) { chain := cProv.PCfg.ChainID feegrantInfo, isFeegrantedChain := feegrantedChains[chain] if isFeegrantedChain && !strings.Contains(cProv.PCfg.KeyDirectory, t.Name()) { - //This would indicate that a parallel test is inserting msgs into the queue. - //We can safely skip over any messages inserted by other test cases. + // This would indicate that a parallel test is inserting msgs into the queue. + // We can safely skip over any messages inserted by other test cases. fmt.Println("Skipping PathProcessorMessageResp from unrelated Parallel test case") continue } @@ -413,7 +412,7 @@ func TestRelayerFeeGrant(t *testing.T) { msgType := "" for _, m := range fullTx.GetMsgs() { msgType = types.MsgTypeURL(m) - //We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion + // We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion if msgType == "/ibc.core.channel.v1.MsgRecvPacket" || msgType == "/ibc.core.channel.v1.MsgAcknowledgement" { isFeegrantedMsg = true msgs += msgType + ", " @@ -422,7 +421,7 @@ func TestRelayerFeeGrant(t *testing.T) { } } - //It's required that TXs be feegranted in a round robin fashion for this chain and message type + // It's required that TXs be feegranted in a round robin fashion for this chain and message type if isFeegrantedChain && isFeegrantedMsg { fmt.Printf("Msg types: %+v\n", msgs) @@ -432,13 +431,13 @@ func TestRelayerFeeGrant(t *testing.T) { require.Equal(t, len(signers), 1) granter := fullTx.FeeGranter(cProv.Cdc.Marshaler) - //Feegranter for the TX that was signed on chain must be the relayer chain's configured feegranter + // Feegranter for the TX that was signed on chain must be the relayer chain's configured feegranter require.Equal(t, feegrantInfo.granter, string(granter)) require.NotEmpty(t, granter) for _, msg := range fullTx.GetMsgs() { msgType = types.MsgTypeURL(msg) - //We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion + // We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion if msgType == "/ibc.core.channel.v1.MsgRecvPacket" { c := msg.(*chantypes.MsgRecvPacket) appData := c.Packet.GetData() @@ -452,9 +451,9 @@ func TestRelayerFeeGrant(t *testing.T) { } } - //Grantee for the TX that was signed on chain must be a configured grantee in the relayer's chain feegrants. - //In addition, the grantee must be used in round robin fashion - //expectedGrantee := nextGrantee(feegrantInfo) + // Grantee for the TX that was signed on chain must be a configured grantee in the relayer's chain feegrants. + // In addition, the grantee must be used in round robin fashion + // expectedGrantee := nextGrantee(feegrantInfo) actualGrantee := string(signers[0]) signerList, ok := feegrantMsgSigners[chain] if ok { @@ -493,10 +492,10 @@ func TestRelayerFeeGrant(t *testing.T) { } } - //At least one feegranter must have signed a TX + // At least one feegranter must have signed a TX require.GreaterOrEqual(t, highestCount, 1) - //All of the feegrantees must have signed at least one TX + // All of the feegrantees must have signed at least one TX expectedFeegrantInfo := feegrantedChains[chain] require.Equal(t, len(signerCountMap), len(expectedFeegrantInfo.grantees)) @@ -560,7 +559,7 @@ func TestRelayerFeeGrantExternal(t *testing.T) { nv := 1 nf := 0 - var tests = [][]*interchaintest.ChainSpec{ + tests := [][]*interchaintest.ChainSpec{ { {Name: "gaia", ChainName: "gaia", Version: "v7.0.3", NumValidators: &nv, NumFullNodes: &nf}, {Name: "osmosis", ChainName: "osmosis", Version: "v14.0.1", NumValidators: &nv, NumFullNodes: &nf}, @@ -574,7 +573,6 @@ func TestRelayerFeeGrantExternal(t *testing.T) { for _, tt := range tests { testname := fmt.Sprintf("%s,%s", tt[0].Name, tt[1].Name) t.Run(testname, func(t *testing.T) { - // Chain Factory cf := interchaintest.NewBuiltinChainFactory(zaptest.NewLogger(t), tt) @@ -692,47 +690,47 @@ func TestRelayerFeeGrantExternal(t *testing.T) { gaiaGranteeWallet.KeyName(), gaiaGranteeWallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, gaia.Config(), gaiaGrantee2Wallet.KeyName(), gaiaGrantee2Wallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, gaia.Config(), gaiaGrantee3Wallet.KeyName(), gaiaGrantee3Wallet.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, osmosis.Config(), osmosisUser.KeyName(), osmosisUser.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", osmosis.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", osmosis.Config().ChainID, err.Error()) } - //IBC chain config is unrelated to RELAYER config so this step is necessary + // IBC chain config is unrelated to RELAYER config so this step is necessary if err := r.RestoreKey(ctx, eRep, osmosis.Config(), gaiaUser.KeyName(), gaiaUser.Mnemonic(), ); err != nil { - t.Fatalf("failed to restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) + t.Fatalf("restore granter key to relayer for chain %s: %s", gaia.Config().ChainID, err.Error()) } gaiaGranteeAddr := gaiaGranteeWallet.FormattedAddress() @@ -742,20 +740,20 @@ func TestRelayerFeeGrantExternal(t *testing.T) { granteeCsv := gaiaGranteeWallet.KeyName() + "," + gaiaGrantee2Wallet.KeyName() + "," + gaiaGrantee3Wallet.KeyName() - //You MUST run the configure feegrant command prior to starting the relayer, otherwise it'd be like you never set it up at all (within this test) - //Note that Gaia supports feegrants, but Osmosis does not (x/feegrant module, or any compatible module, is not included in Osmosis SDK app modules) + // You MUST run the configure feegrant command prior to starting the relayer, otherwise it'd be like you never set it up at all (within this test) + // Note that Gaia supports feegrants, but Osmosis does not (x/feegrant module, or any compatible module, is not included in Osmosis SDK app modules) localRelayer := r.(*Relayer) res := localRelayer.Sys().Run(logger, "chains", "configure", "feegrant", "basicallowance", gaia.Config().ChainID, gaiaGranterWallet.FormattedAddress(), "--grantees", granteeCsv, "--overwrite-granter") if res.Err != nil { fmt.Printf("configure feegrant results: %s\n", res.Stdout.String()) - t.Fatalf("failed to rly config feegrants: %v", res.Err) + t.Fatalf("rly config feegrants: %v", res.Err) } - //Map of feegranted chains and the feegrant info for the chain + // Map of feegranted chains and the feegrant info for the chain feegrantedChains := map[string]*chainFeegrantInfo{} feegrantedChains[gaia.Config().ChainID] = &chainFeegrantInfo{granter: gaiaGranterAddr, grantees: []string{gaiaGranteeAddr, gaiaGrantee2Addr, gaiaGrantee3Addr}} - time.Sleep(14 * time.Second) //commit a couple blocks + time.Sleep(14 * time.Second) // commit a couple blocks r.StartRelayer(ctx, eRep, ibcPath) // Send Transaction @@ -849,7 +847,7 @@ func TestRelayerFeeGrantExternal(t *testing.T) { require.NoError(t, err) require.NoError(t, eg.Wait()) - feegrantMsgSigners := map[string][]string{} //chain to list of signers + feegrantMsgSigners := map[string][]string{} // chain to list of signers for len(processor.PathProcMessageCollector) > 0 { select { @@ -860,8 +858,8 @@ func TestRelayerFeeGrantExternal(t *testing.T) { chain := cProv.PCfg.ChainID feegrantInfo, isFeegrantedChain := feegrantedChains[chain] if isFeegrantedChain && !strings.Contains(cProv.PCfg.KeyDirectory, t.Name()) { - //This would indicate that a parallel test is inserting msgs into the queue. - //We can safely skip over any messages inserted by other test cases. + // This would indicate that a parallel test is inserting msgs into the queue. + // We can safely skip over any messages inserted by other test cases. fmt.Println("Skipping PathProcessorMessageResp from unrelated Parallel test case") continue } @@ -887,7 +885,7 @@ func TestRelayerFeeGrantExternal(t *testing.T) { msgType := "" for _, m := range fullTx.GetMsgs() { msgType = types.MsgTypeURL(m) - //We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion + // We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion if msgType == "/ibc.core.channel.v1.MsgRecvPacket" || msgType == "/ibc.core.channel.v1.MsgAcknowledgement" { isFeegrantedMsg = true msgs += msgType + ", " @@ -896,7 +894,7 @@ func TestRelayerFeeGrantExternal(t *testing.T) { } } - //It's required that TXs be feegranted in a round robin fashion for this chain and message type + // It's required that TXs be feegranted in a round robin fashion for this chain and message type if isFeegrantedChain && isFeegrantedMsg { fmt.Printf("Msg types: %+v\n", msgs) @@ -906,13 +904,13 @@ func TestRelayerFeeGrantExternal(t *testing.T) { require.Equal(t, len(signers), 1) granter := fullTx.FeeGranter(cProv.Cdc.Marshaler) - //Feegranter for the TX that was signed on chain must be the relayer chain's configured feegranter + // Feegranter for the TX that was signed on chain must be the relayer chain's configured feegranter require.Equal(t, feegrantInfo.granter, string(granter)) require.NotEmpty(t, granter) for _, msg := range fullTx.GetMsgs() { msgType = types.MsgTypeURL(msg) - //We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion + // We want all IBC transfers (on an open channel/connection) to be feegranted in round robin fashion if msgType == "/ibc.core.channel.v1.MsgRecvPacket" { c := msg.(*chantypes.MsgRecvPacket) appData := c.Packet.GetData() @@ -926,9 +924,9 @@ func TestRelayerFeeGrantExternal(t *testing.T) { } } - //Grantee for the TX that was signed on chain must be a configured grantee in the relayer's chain feegrants. - //In addition, the grantee must be used in round robin fashion - //expectedGrantee := nextGrantee(feegrantInfo) + // Grantee for the TX that was signed on chain must be a configured grantee in the relayer's chain feegrants. + // In addition, the grantee must be used in round robin fashion + // expectedGrantee := nextGrantee(feegrantInfo) actualGrantee := string(signers[0]) signerList, ok := feegrantMsgSigners[chain] if ok { @@ -967,10 +965,10 @@ func TestRelayerFeeGrantExternal(t *testing.T) { } } - //At least one feegranter must have signed a TX + // At least one feegranter must have signed a TX require.GreaterOrEqual(t, highestCount, 1) - //All of the feegrantees must have signed at least one TX + // All of the feegrantees must have signed at least one TX expectedFeegrantInfo := feegrantedChains[chain] require.Equal(t, len(signerCountMap), len(expectedFeegrantInfo.grantees)) @@ -1021,7 +1019,7 @@ func buildUserUnfunded( keyName := fmt.Sprintf("%s-%s-%s", keyNamePrefix, chainCfg.ChainID, randLowerCaseLetterString(3)) user, err := chain.BuildWallet(ctx, keyName, mnemonic) if err != nil { - return nil, fmt.Errorf("failed to get source user wallet: %w", err) + return nil, fmt.Errorf("get source user wallet: %w", err) } return user, nil diff --git a/interchaintest/relayer.go b/interchaintest/relayer.go index 025221516..e00b3eeb8 100644 --- a/interchaintest/relayer.go +++ b/interchaintest/relayer.go @@ -38,7 +38,7 @@ func NewRelayer( t *testing.T, config RelayerConfig, ) ibc.Relayer { - //prevent incorrect bech32 address prefixed addresses when calling AccAddress.String() + // prevent incorrect bech32 address prefixed addresses when calling AccAddress.String() types.SetAddrCacheEnabled(false) r := &Relayer{ @@ -49,7 +49,7 @@ func NewRelayer( res := r.Sys().Run(zaptest.NewLogger(t), "config", "init", "--memo", config.Memo) if res.Err != nil { - t.Fatalf("failed to rly config init: %v", res.Err) + t.Fatalf("rly config init: %v", res.Err) } return r @@ -142,7 +142,7 @@ func (r *Relayer) GetChannels(ctx context.Context, _ ibc.RelayerExecReporter, ch } var channelOutput ibc.ChannelOutput if err := json.Unmarshal([]byte(channel), &channelOutput); err != nil { - return nil, fmt.Errorf("failed to parse channel %q: %w", channel, err) + return nil, fmt.Errorf("parse channel %q: %w", channel, err) } channels = append(channels, channelOutput) } @@ -163,7 +163,7 @@ func (r *Relayer) GetClients(ctx context.Context, _ ibc.RelayerExecReporter, cha } clientOutput := &ibc.ClientOutput{} if err := json.Unmarshal([]byte(client), clientOutput); err != nil { - return nil, fmt.Errorf("failed to parse client %q: %w", client, err) + return nil, fmt.Errorf("parse client %q: %w", client, err) } clients = append(clients, clientOutput) } @@ -201,7 +201,7 @@ func (r *Relayer) GetConnections(ctx context.Context, _ ibc.RelayerExecReporter, err := json.Unmarshal([]byte(connection), &connectionOutput) if err != nil { r.log().Error( - "Error parsing connection json", + "parsing connection json", zap.Error(err), ) @@ -356,16 +356,16 @@ func (r *Relayer) GetWallet(chainID string) (ibc.Wallet, bool) { // SetClientContractHash sets the wasm client contract hash in the chain's config if the counterparty chain in a path used 08-wasm // to instantiate the client. func (r *Relayer) SetClientContractHash(ctx context.Context, rep ibc.RelayerExecReporter, cfg ibc.ChainConfig, hash string) error { - //TODO implement me + // TODO implement me panic("implement me") } func (r *Relayer) PauseRelayer(ctx context.Context) error { - //TODO implement me + // TODO implement me panic("implement me") } func (r *Relayer) ResumeRelayer(ctx context.Context) error { - //TODO implement me + // TODO implement me panic("implement me") } diff --git a/interchaintest/stride/setup_test.go b/interchaintest/stride/setup_test.go index 0258ddb1f..3a0b1a29c 100644 --- a/interchaintest/stride/setup_test.go +++ b/interchaintest/stride/setup_test.go @@ -121,7 +121,7 @@ func ModifyGenesisStride() func(ibc.ChainConfig, []byte) ([]byte, error) { return func(cfg ibc.ChainConfig, genbz []byte) ([]byte, error) { g := make(map[string]interface{}) if err := json.Unmarshal(genbz, &g); err != nil { - return nil, fmt.Errorf("failed to unmarshal genesis file: %w", err) + return nil, fmt.Errorf("unmarshal genesis file: %w", err) } if err := dyno.Set(g, DayEpochLen, "app_state", "epochs", "epochs", DayEpochIndex, "duration"); err != nil { @@ -149,15 +149,15 @@ func ModifyGenesisStride() func(ibc.ChainConfig, []byte) ([]byte, error) { return nil, err } if err := dyno.Set(g, VotingPeriod, "app_state", "gov", "voting_params", "voting_period"); err != nil { - return nil, fmt.Errorf("failed to set voting period in genesis json: %w", err) + return nil, fmt.Errorf("set voting period in genesis json: %w", err) } if err := dyno.Set(g, MaxDepositPeriod, "app_state", "gov", "deposit_params", "max_deposit_period"); err != nil { - return nil, fmt.Errorf("failed to set voting period in genesis json: %w", err) + return nil, fmt.Errorf("set voting period in genesis json: %w", err) } out, err := json.Marshal(g) if err != nil { - return nil, fmt.Errorf("failed to marshal genesis bytes to json: %w", err) + return nil, fmt.Errorf("marshal genesis bytes to json: %w", err) } return out, nil } @@ -167,7 +167,7 @@ func ModifyGenesisStrideCounterparty() func(ibc.ChainConfig, []byte) ([]byte, er return func(cfg ibc.ChainConfig, genbz []byte) ([]byte, error) { g := make(map[string]interface{}) if err := json.Unmarshal(genbz, &g); err != nil { - return nil, fmt.Errorf("failed to unmarshal genesis file: %w", err) + return nil, fmt.Errorf("unmarshal genesis file: %w", err) } if err := dyno.Set(g, UnbondingTime, @@ -184,7 +184,7 @@ func ModifyGenesisStrideCounterparty() func(ibc.ChainConfig, []byte) ([]byte, er out, err := json.Marshal(g) if err != nil { - return nil, fmt.Errorf("failed to marshal genesis bytes to json: %w", err) + return nil, fmt.Errorf("marshal genesis bytes to json: %w", err) } return out, nil } diff --git a/interchaintest/stride/stride_icq_test.go b/interchaintest/stride/stride_icq_test.go index cbd414b67..6948fb2fe 100644 --- a/interchaintest/stride/stride_icq_test.go +++ b/interchaintest/stride/stride_icq_test.go @@ -63,7 +63,8 @@ func TestScenarioStrideICAandICQ(t *testing.T) { GasAdjustment: 1.1, ModifyGenesis: ModifyGenesisStride(), EncodingConfig: StrideEncoding(), - }}, + }, + }, { Name: "gaia", ChainName: "gaia", @@ -161,7 +162,7 @@ func TestScenarioStrideICAandICQ(t *testing.T) { Amount: initBal, Denom: strideCfg.Denom, }) - require.NoError(t, err, "failed to fund stride admin account") + require.NoError(t, err, "fund stride admin account") logger.Info("TestScenarioStrideICAandICQ [4]") diff --git a/internal/relayertest/system.go b/internal/relayertest/system.go index 59daad624..a87b4028a 100644 --- a/internal/relayertest/system.go +++ b/internal/relayertest/system.go @@ -97,7 +97,7 @@ func (s *System) MustRunWithInput(t *testing.T, in io.Reader, args ...string) Ru res := s.RunWithInput(zaptest.NewLogger(t), in, args...) if res.Err != nil { - t.Logf("Error executing %v: %v", args, res.Err) + t.Logf("executing %v: %v", args, res.Err) t.Logf("Stdout: %q", res.Stdout.String()) t.Logf("Stderr: %q", res.Stderr.String()) t.FailNow() @@ -131,17 +131,17 @@ func (s *System) MustGetConfig(t *testing.T) (config cmd.ConfigInputWrapper) { t.Helper() configBz, err := os.ReadFile(filepath.Join(s.HomeDir, "config", "config.yaml")) - require.NoError(t, err, "failed to read config file") + require.NoError(t, err, "read config file") err = yaml.Unmarshal(configBz, &config) - require.NoError(t, err, "failed to unmarshal config file") + require.NoError(t, err, "unmarshal config file") return config } func (s *System) WriteConfig(t *testing.T, contents []byte) error { t.Helper() - return os.WriteFile(filepath.Join(s.HomeDir, "config", "config.yaml"), contents, 0600) + return os.WriteFile(filepath.Join(s.HomeDir, "config", "config.yaml"), contents, 0o600) } // A fixed mnemonic and its resulting cosmos address, helpful for tests that need a mnemonic. diff --git a/relayer/chain.go b/relayer/chain.go index dfcf728cb..638657e12 100644 --- a/relayer/chain.go +++ b/relayer/chain.go @@ -4,10 +4,11 @@ import ( "context" "encoding/json" "fmt" - "github.com/avast/retry-go/v4" "net/url" "time" + "github.com/avast/retry-go/v4" + "github.com/cosmos/cosmos-sdk/crypto/hd" clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" "github.com/cosmos/relayer/v2/relayer/provider" @@ -156,7 +157,7 @@ func (c *Chain) CreateTestKey() error { func (c *Chain) GetTimeout() (time.Duration, error) { timeout, err := time.ParseDuration(c.ChainProvider.Timeout()) if err != nil { - return 0, fmt.Errorf("failed to parse timeout (%s) for chain %s: %w", c.ChainProvider.Timeout(), c.ChainID(), err) + return 0, fmt.Errorf("parse timeout (%s) for chain %s: %w", c.ChainProvider.Timeout(), c.ChainID(), err) } return timeout, nil } diff --git a/relayer/chains/cosmos/account.go b/relayer/chains/cosmos/account.go index 311939589..5f431458d 100644 --- a/relayer/chains/cosmos/account.go +++ b/relayer/chains/cosmos/account.go @@ -45,7 +45,7 @@ func (cc *CosmosProvider) GetAccountWithHeight(_ client.Context, addr sdk.AccAdd nBlockHeight, err := strconv.Atoi(blockHeight[0]) if err != nil { - return nil, 0, fmt.Errorf("failed to parse block height: %w", err) + return nil, 0, fmt.Errorf("parse block height: %w", err) } var acc authtypes.AccountI diff --git a/relayer/chains/cosmos/cosmos_chain_processor.go b/relayer/chains/cosmos/cosmos_chain_processor.go index 6d5a30fc3..2c627697c 100644 --- a/relayer/chains/cosmos/cosmos_chain_processor.go +++ b/relayer/chains/cosmos/cosmos_chain_processor.go @@ -103,7 +103,7 @@ func (l latestClientState) update(ctx context.Context, clientInfo chains.ClientI cs, err := ccp.chainProvider.queryTMClientState(ctx, 0, clientInfo.ClientID) if err != nil { ccp.log.Error( - "Failed to query client state to get trusting period", + "Query client state to get trusting period.", zap.String("client_id", clientInfo.ClientID), zap.Error(err), ) @@ -138,8 +138,8 @@ func (ccp *CosmosChainProcessor) latestHeightWithRetry(ctx context.Context) (lat latestHeight, err = ccp.chainProvider.QueryLatestHeight(latestHeightQueryCtx) return err }, retry.Context(ctx), retry.Attempts(latestHeightQueryRetries), retry.Delay(latestHeightQueryRetryDelay), retry.LastErrorOnly(true), retry.OnRetry(func(n uint, err error) { - ccp.log.Error( - "Failed to query latest height", + ccp.log.Debug( + "Retrying query latest height.", zap.Uint("attempt", n+1), zap.Uint("max_attempts", latestHeightQueryRetries), zap.Error(err), @@ -157,8 +157,8 @@ func (ccp *CosmosChainProcessor) nodeStatusWithRetry(ctx context.Context) (statu status, err = ccp.chainProvider.QueryStatus(latestHeightQueryCtx) return err }, retry.Context(ctx), retry.Attempts(latestHeightQueryRetries), retry.Delay(latestHeightQueryRetryDelay), retry.LastErrorOnly(true), retry.OnRetry(func(n uint, err error) { - ccp.log.Error( - "Failed to query node status", + ccp.log.Debug( + "Retrying query node status.", zap.Uint("attempt", n+1), zap.Uint("max_attempts", latestHeightQueryRetries), zap.Error(err), @@ -231,7 +231,7 @@ func (ccp *CosmosChainProcessor) Run(ctx context.Context, initialBlockHistory ui status, err := ccp.nodeStatusWithRetry(ctx) if err != nil { ccp.log.Error( - "Failed to query latest height after max attempts", + "Query latest height after max attempts", zap.Uint("attempts", latestHeightQueryRetries), zap.Error(err), ) @@ -269,7 +269,7 @@ func (ccp *CosmosChainProcessor) Run(ctx context.Context, initialBlockHistory ui return err } - ccp.log.Debug("Entering main query loop") + ccp.log.Debug("Entering main query loop.") ticker := time.NewTicker(persistence.minQueryLoopDuration) defer ticker.Stop() @@ -293,7 +293,7 @@ func (ccp *CosmosChainProcessor) initializeConnectionState(ctx context.Context) defer cancel() connections, err := ccp.chainProvider.QueryConnections(ctx) if err != nil { - return fmt.Errorf("error querying connections: %w", err) + return fmt.Errorf("querying connections: %w", err) } for _, c := range connections { ccp.connectionClients[c.Id] = c.ClientId @@ -314,7 +314,7 @@ func (ccp *CosmosChainProcessor) initializeChannelState(ctx context.Context) err channels, err := ccp.chainProvider.QueryChannels(ctx) if err != nil { - return fmt.Errorf("error querying channels: %w", err) + return fmt.Errorf("querying channels: %w", err) } for _, ch := range channels { @@ -351,7 +351,7 @@ func (ccp *CosmosChainProcessor) queryCycle( if err != nil { // don't want to cause CosmosChainProcessor to quit here, can retry again next cycle. ccp.log.Error( - "Failed to query node status after max attempts", + "Query node status after max attempts.", zap.Uint("attempts", latestHeightQueryRetries), zap.Error(err), ) @@ -360,11 +360,6 @@ func (ccp *CosmosChainProcessor) queryCycle( persistence.latestHeight = status.SyncInfo.LatestBlockHeight - // This debug log is very noisy, but is helpful when debugging new chains. - // ccp.log.Debug("Queried latest height", - // zap.Int64("latest_height", persistence.latestHeight), - // ) - if ccp.metrics != nil { ccp.CollectMetrics(ctx, persistence) } @@ -376,9 +371,9 @@ func (ccp *CosmosChainProcessor) queryCycle( if (persistence.latestHeight - persistence.latestQueriedBlock) < int64(defaultInSyncNumBlocksThreshold) { ccp.inSync = true firstTimeInSync = true - ccp.log.Info("Chain is in sync", zap.Bool("first time", firstTimeInSync)) + ccp.log.Info("Chain in sync.", zap.Bool("first time", firstTimeInSync)) } else { - ccp.log.Info("Chain is not yet in sync", + ccp.log.Info("Chain not in sync.", zap.Int64("latest_queried_block", persistence.latestQueriedBlock), zap.Int64("latest_height", persistence.latestHeight), ) @@ -404,8 +399,6 @@ func (ccp *CosmosChainProcessor) queryCycle( firstHeightToQuery++ } - startTime := time.Now() - for i := firstHeightToQuery; i <= persistence.latestHeight; i++ { var ( eg errgroup.Group @@ -441,14 +434,14 @@ func (ccp *CosmosChainProcessor) queryCycle( if err := eg.Wait(); err != nil { ccp.log.Debug( - "Error querying block data", + "querying block data", zap.Int64("height", i), zap.Error(err), ) persistence.retriesAtLatestQueriedBlock++ if persistence.retriesAtLatestQueriedBlock >= blockMaxRetries { - ccp.log.Warn("Reached max retries querying for block, skipping", zap.Int64("height", i)) + ccp.log.Error("Reached max retries querying for block, skipping", zap.Int64("height", i)) // skip this block. now depends on flush to pickup anything missed in the block. persistence.latestQueriedBlock = i persistence.retriesAtLatestQueriedBlock = 0 @@ -457,13 +450,6 @@ func (ccp *CosmosChainProcessor) queryCycle( break } - ccp.log.Debug( - "Queried block", - zap.Int64("height", i), - zap.Int64("latest", persistence.latestHeight), - zap.Int64("delta", persistence.latestHeight-i), - ) - persistence.retriesAtLatestQueriedBlock = 0 latestHeader = ibcHeader.(provider.TendermintIBCHeader) @@ -493,12 +479,11 @@ func (ccp *CosmosChainProcessor) queryCycle( messages := chains.IbcMessagesFromEvents(ccp.log, tx.Events, chainID, heightUint64) for _, m := range messages { - if stuckPacket != nil && ccp.chainProvider.ChainId() == stuckPacket.ChainID && int64(stuckPacket.StartHeight) <= i && i <= int64(stuckPacket.EndHeight) { - switch t := m.Info.(type) { - case *chains.PacketInfo: - ccp.log.Info("found stuck packet message", zap.Any("seq", t.Sequence), zap.Any("height", t.Height)) + switch t := m.Info.(type) { + case *chains.PacketInfo: + if stuckPacket != nil && ccp.chainProvider.ChainId() == stuckPacket.ChainID && int64(stuckPacket.StartHeight) <= i && i <= int64(stuckPacket.EndHeight) { + ccp.log.Info("Found stuck packet message.", zap.Any("seq", t.Sequence), zap.Any("height", t.Height)) } - ccp.log.Debug("found stuck message (all data)", zap.Any("msg", m)) } ccp.handleMessage(ctx, m, ibcMessagesCache) } @@ -514,12 +499,7 @@ func (ccp *CosmosChainProcessor) queryCycle( i = persistence.latestHeight newLatestQueriedBlock = afterUnstuck - ccp.log.Info("Parsed stuck packet height, skipping to current", zap.Any("new latest queried block", newLatestQueriedBlock)) - } - - if i%100 == 0 { - elapsed := time.Since(startTime) - ccp.log.Info("Processed block", zap.Int64("height", i), zap.Duration("elapsed", elapsed), zap.Int64("latest", persistence.latestHeight)) + ccp.log.Info("Parsed stuck packet height, skipping to current.", zap.Any("new latest queried block", newLatestQueriedBlock)) } } @@ -541,7 +521,7 @@ func (ccp *CosmosChainProcessor) queryCycle( clientID := pp.RelevantClientID(chainID) clientState, err := ccp.clientState(ctx, clientID) if err != nil { - ccp.log.Error("Error fetching client state", + ccp.log.Error("Fetching client state.", zap.String("client_id", clientID), zap.Error(err), ) @@ -585,7 +565,7 @@ func (ccp *CosmosChainProcessor) CurrentRelayerBalance(ctx context.Context) { gp, err := sdk.ParseDecCoins(ccp.chainProvider.PCfg.GasPrices) if err != nil { ccp.log.Error( - "Failed to parse gas prices", + "Parse gas prices.", zap.Error(err), ) } @@ -596,14 +576,14 @@ func (ccp *CosmosChainProcessor) CurrentRelayerBalance(ctx context.Context) { relayerWalletBalances, err := ccp.chainProvider.QueryBalance(ctx, ccp.chainProvider.Key()) if err != nil { ccp.log.Error( - "Failed to query relayer balance", + "Query relayer balance.", zap.Error(err), ) } address, err := ccp.chainProvider.Address() if err != nil { ccp.log.Error( - "Failed to get relayer bech32 wallet addresss", + "Get relayer bech32 wallet address.", zap.Error(err), ) } diff --git a/relayer/chains/cosmos/feegrant.go b/relayer/chains/cosmos/feegrant.go index 0d1ef1f4e..c7378a0fb 100644 --- a/relayer/chains/cosmos/feegrant.go +++ b/relayer/chains/cosmos/feegrant.go @@ -113,15 +113,15 @@ func (cc *CosmosProvider) GetGranteeValidBasicGrants(granteeKey string) ([]*feeg // True if the grant has not expired and all coins have positive balances, false otherwise // Note: technically, any single coin with a positive balance makes the grant usable func isValidGrant(a *feegrant.BasicAllowance) bool { - //grant expired due to time limit + // grant expired due to time limit if a.Expiration != nil && time.Now().After(*a.Expiration) { return false } - //feegrant without a spending limit specified allows unlimited fees to be spent + // feegrant without a spending limit specified allows unlimited fees to be spent valid := true - //spending limit is specified, check if there are funds remaining on every coin + // spending limit is specified, check if there are funds remaining on every coin if a.SpendLimit != nil { for _, coin := range a.SpendLimit { if coin.Amount.LTE(sdkmath.ZeroInt()) { @@ -272,7 +272,7 @@ func (cc *CosmosProvider) EnsureBasicGrants(ctx context.Context, memo string, ga } else { granterAcc, err = cc.GetKeyAddressForKey(granterKey) if err != nil { - cc.log.Error("Unknown key", zap.String("name", granterKey)) + cc.log.Error("Unknown key.", zap.String("name", granterKey)) return nil, err } @@ -301,7 +301,7 @@ func (cc *CosmosProvider) EnsureBasicGrants(ctx context.Context, memo string, ga granteeAcc, err := cc.GetKeyAddressForKey(grantee) if err != nil { - cc.log.Error("Unknown grantee", zap.String("key_name", grantee)) + cc.log.Error("Unknown grantee.", zap.String("key_name", grantee)) return nil, err } @@ -319,7 +319,7 @@ func (cc *CosmosProvider) EnsureBasicGrants(ctx context.Context, memo string, ga if !hasGrant && !cc.PCfg.FeeGrants.IsExternalGranter { grantsNeeded++ - cc.log.Info("Creating feegrant", zap.String("granter", granterAddr), zap.String("grantee", granteeAddr)) + cc.log.Info("Creating feegrant.", zap.String("granter", granterAddr), zap.String("grantee", granteeAddr)) grantMsg, err := cc.getMsgGrantBasicAllowance(granterAcc, granteeAcc) if err != nil { @@ -327,7 +327,7 @@ func (cc *CosmosProvider) EnsureBasicGrants(ctx context.Context, memo string, ga } msgs = append(msgs, grantMsg) } else if !hasGrant { - cc.log.Warn("Missing feegrant", zap.String("external_granter", granterAddr), zap.String("grantee", granteeAddr)) + cc.log.Error("Missing feegrant.", zap.String("external_granter", granterAddr), zap.String("grantee", granteeAddr)) } } @@ -346,11 +346,11 @@ func (cc *CosmosProvider) EnsureBasicGrants(ctx context.Context, memo string, ga if err != nil { return nil, err } else if txResp != nil && txResp.TxResponse != nil && txResp.TxResponse.Code != 0 { - cc.log.Warn("Feegrant TX failed", zap.String("tx_hash", txResp.TxResponse.TxHash), zap.Uint32("code", txResp.TxResponse.Code)) + cc.log.Error("Feegrant TX.", zap.String("tx_hash", txResp.TxResponse.TxHash), zap.Uint32("code", txResp.TxResponse.Code)) return nil, fmt.Errorf("could not configure feegrant for granter %s", granterKey) } - cc.log.Info("Feegrant succeeded", zap.Int("new_grants", grantsNeeded), zap.Int("existing_grants", numGrantees-grantsNeeded), zap.String("tx_hash", txResp.TxResponse.TxHash)) + cc.log.Info("Feegrant succeeded.", zap.Int("new_grants", grantsNeeded), zap.Int("existing_grants", numGrantees-grantsNeeded), zap.String("tx_hash", txResp.TxResponse.TxHash)) return txResp.TxResponse, err } @@ -376,15 +376,14 @@ func (cc *CosmosProvider) GrantAllGranteesBasicAllowance(ctx context.Context, ga } granterAddr, err := cc.GetKeyAddressForKey(granterKey) if err != nil { - cc.log.Error("Unknown granter", zap.String("key_name", granterKey)) + cc.log.Error("Unknown granter.", zap.String("key_name", granterKey)) return err } for _, grantee := range cc.PCfg.FeeGrants.ManagedGrantees { granteeAddr, err := cc.GetKeyAddressForKey(grantee) - if err != nil { - cc.log.Error("Unknown grantee", zap.String("key_name", grantee)) + cc.log.Error("Unknown grantee.", zap.String("key_name", grantee)) return err } @@ -414,15 +413,14 @@ func (cc *CosmosProvider) GrantAllGranteesBasicAllowanceWithExpiration(ctx conte granterAddr, err := cc.GetKeyAddressForKey(granterKey) if err != nil { - cc.log.Error("Unknown granter", zap.String("key_name", granterKey)) + cc.log.Error("Unknown granter.", zap.String("key_name", granterKey)) return err } for _, grantee := range cc.PCfg.FeeGrants.ManagedGrantees { granteeAddr, err := cc.GetKeyAddressForKey(grantee) - if err != nil { - cc.log.Error("Unknown grantee", zap.String("key_name", grantee)) + cc.log.Error("Unknown grantee.", zap.String("key_name", grantee)) return err } diff --git a/relayer/chains/cosmos/keys_test.go b/relayer/chains/cosmos/keys_test.go index 210dc38e3..f1c2edaba 100644 --- a/relayer/chains/cosmos/keys_test.go +++ b/relayer/chains/cosmos/keys_test.go @@ -22,11 +22,11 @@ func testProviderWithKeystore(t *testing.T, accountPrefix string, extraCodecs [] } p, err := cfg.NewProvider(zap.NewNop(), homePath, true, "test_chain") if err != nil { - t.Fatalf("Error creating provider: %v", err) + t.Fatalf("creating provider: %v", err) } err = p.CreateKeystore(homePath) if err != nil { - t.Fatalf("Error creating keystore: %v", err) + t.Fatalf("creating keystore: %v", err) } return p } diff --git a/relayer/chains/cosmos/log.go b/relayer/chains/cosmos/log.go index ccf29816d..b56169b5f 100644 --- a/relayer/chains/cosmos/log.go +++ b/relayer/chains/cosmos/log.go @@ -65,7 +65,7 @@ func (cc *CosmosProvider) LogFailedTx(res *provider.RelayerTxResponse, err error // Make a copy since we may continue to the warning errorFields := append(fields, zap.Error(err)) cc.log.Error( - "Failed sending cosmos transaction", + "Sending cosmos transaction.", errorFields..., ) @@ -79,8 +79,8 @@ func (cc *CosmosProvider) LogFailedTx(res *provider.RelayerTxResponse, err error fields = append(fields, zap.NamedError("sdk_error", sdkErr)) } fields = append(fields, zap.Object("response", res)) - cc.log.Warn( - "Sent transaction but received failure response", + cc.log.Error( + "Sent transaction but got non success code.", fields..., ) } @@ -112,12 +112,12 @@ func (cc *CosmosProvider) LogSuccessTx(res *sdk.TxResponse, msgs []provider.Rela } } else { cc.log.Debug( - "Failed to convert message to Tx type", + "convert message to Tx type", zap.Stringer("type", reflect.TypeOf(m)), ) } } else { - cc.log.Debug("Failed to unpack response Tx into sdk.Msg", zap.Error(err)) + cc.log.Debug("unpack response Tx into sdk.Msg", zap.Error(err)) } // Include the height, msgType, and tx_hash @@ -129,7 +129,7 @@ func (cc *CosmosProvider) LogSuccessTx(res *sdk.TxResponse, msgs []provider.Rela // Log the successful transaction with fields cc.log.Info( - "Successful transaction", + "Transaction.", fields..., ) } @@ -181,7 +181,7 @@ func getFeePayer(log *zap.Logger, cdc *codec.ProtoCodec, tx *typestx.Tx) string default: signers, _, err := cdc.GetMsgV1Signers(firstMsg) if err != nil { - log.Info("Could not get signers for msg when attempting to get the fee payer", zap.Error(err)) + log.Error("Could not get signers for msg when attempting to get the fee payer.", zap.Error(err)) return "" } diff --git a/relayer/chains/cosmos/message_handlers.go b/relayer/chains/cosmos/message_handlers.go index db563e361..31363d713 100644 --- a/relayer/chains/cosmos/message_handlers.go +++ b/relayer/chains/cosmos/message_handlers.go @@ -10,7 +10,6 @@ import ( "github.com/cosmos/relayer/v2/relayer/processor" "github.com/cosmos/relayer/v2/relayer/provider" "go.uber.org/zap" - "go.uber.org/zap/zapcore" ) func (ccp *CosmosChainProcessor) handleMessage(ctx context.Context, m chains.IbcMessage, c processor.IBCMessagesCache) { @@ -31,7 +30,7 @@ func (ccp *CosmosChainProcessor) handleMessage(ctx context.Context, m chains.Ibc func (ccp *CosmosChainProcessor) handlePacketMessage(eventType string, pi provider.PacketInfo, c processor.IBCMessagesCache) { k, err := processor.PacketInfoChannelKey(eventType, pi) if err != nil { - ccp.log.Error("Unexpected error handling packet message", + ccp.log.Error("Unexpected in handling packet message.", zap.String("event_type", eventType), zap.Uint64("sequence", pi.Sequence), zap.Inline(k), @@ -45,7 +44,7 @@ func (ccp *CosmosChainProcessor) handlePacketMessage(eventType string, pi provid } if !c.PacketFlow.ShouldRetainSequence(ccp.pathProcessors, k, ccp.chainProvider.ChainId(), eventType, pi.Sequence) { - ccp.log.Debug("Not retaining packet message", + ccp.log.Debug("Not retaining packet message.", zap.String("event_type", eventType), zap.Uint64("sequence", pi.Sequence), zap.Inline(k), @@ -53,14 +52,8 @@ func (ccp *CosmosChainProcessor) handlePacketMessage(eventType string, pi provid return } - ccp.log.Debug("Retaining packet message", - zap.String("event_type", eventType), - zap.Uint64("sequence", pi.Sequence), - zap.Uint64("height", pi.Height), - zap.Inline(k), - ) - c.PacketFlow.Retain(k, eventType, pi) + ccp.logPacketMessage(eventType, pi) } @@ -88,7 +81,12 @@ func (ccp *CosmosChainProcessor) handleChannelMessage(eventType string, ci provi ccp.channelStateCache.SetOpen(channelKey, false, ci.Order) case chantypes.EventTypeChannelOpenAck, chantypes.EventTypeChannelOpenConfirm: ccp.channelStateCache.SetOpen(channelKey, true, ci.Order) - ccp.logChannelOpenMessage(eventType, ci) + fields := []zap.Field{ + zap.String("channel_id", ci.ChannelID), + zap.String("connection_id", ci.ConnID), + zap.String("port_id", ci.PortID), + } + ccp.log.Info("Created new channel.", fields...) case chantypes.EventTypeChannelClosed, chantypes.EventTypeChannelCloseConfirm: for k := range ccp.channelStateCache { if k.PortID == ci.PortID && k.ChannelID == ci.ChannelID { @@ -103,7 +101,13 @@ func (ccp *CosmosChainProcessor) handleChannelMessage(eventType string, ci provi ibcMessagesCache.ChannelHandshake.Retain(channelKey, eventType, ci) - ccp.logChannelMessage(eventType, ci) + ccp.log.With(zap.String("event_type", eventType)).Debug("Retained channel message.", []zap.Field{ + zap.String("channel_id", ci.ChannelID), + zap.String("port_id", ci.PortID), + zap.String("counterparty_channel_id", ci.CounterpartyChannelID), + zap.String("counterparty_port_id", ci.CounterpartyPortID), + zap.String("connection_id", ci.ConnID), + }...) } func (ccp *CosmosChainProcessor) handleConnectionMessage(eventType string, ci provider.ConnectionInfo, ibcMessagesCache processor.IBCMessagesCache) { @@ -126,17 +130,22 @@ func (ccp *CosmosChainProcessor) handleConnectionMessage(eventType string, ci pr } else { // Clear out MsgInitKeys once we have the counterparty connection ID delete(ccp.connectionStateCache, connectionKey.MsgInitKey()) - open := (eventType == conntypes.EventTypeConnectionOpenAck || eventType == conntypes.EventTypeConnectionOpenConfirm) + open := eventType == conntypes.EventTypeConnectionOpenAck || eventType == conntypes.EventTypeConnectionOpenConfirm ccp.connectionStateCache[connectionKey] = open } ibcMessagesCache.ConnectionHandshake.Retain(connectionKey, eventType, ci) - ccp.logConnectionMessage(eventType, ci) + ccp.log.With(zap.String("event_type", eventType)).Debug("Retained connection message", []zap.Field{ + zap.String("client_id", ci.ClientID), + zap.String("connection_id", ci.ConnID), + zap.String("counterparty_client_id", ci.CounterpartyClientID), + zap.String("counterparty_connection_id", ci.CounterpartyConnID), + }...) } func (ccp *CosmosChainProcessor) handleClientMessage(ctx context.Context, eventType string, ci chains.ClientInfo) { ccp.latestClientState.update(ctx, ci, ccp) - ccp.logObservedIBCMessage(eventType, zap.String("client_id", ci.ClientID)) + ccp.log.With(zap.String("event_type", eventType)).Debug("Observed client message.", []zap.Field{zap.String("client_id", ci.ClientID)}...) } func (ccp *CosmosChainProcessor) handleClientICQMessage( @@ -145,17 +154,17 @@ func (ccp *CosmosChainProcessor) handleClientICQMessage( c processor.IBCMessagesCache, ) { c.ClientICQ.Retain(processor.ClientICQType(eventType), ci) - ccp.logClientICQMessage(eventType, ci) -} - -func (ccp *CosmosChainProcessor) logObservedIBCMessage(m string, fields ...zap.Field) { - ccp.log.With(zap.String("event_type", m)).Debug("Observed IBC message", fields...) + ccp.log.With(zap.String("event_type", eventType)).Debug("Retained client ICQ message", []zap.Field{ + zap.String("type", ci.Type), + zap.String("query_id", string(ci.QueryID)), + zap.String("request", hex.EncodeToString(ci.Request)), + zap.String("chain_id", ci.Chain), + zap.String("connection_id", ci.Connection), + zap.Uint64("height", ci.Height), + }...) } func (ccp *CosmosChainProcessor) logPacketMessage(message string, pi provider.PacketInfo) { - if !ccp.log.Core().Enabled(zapcore.DebugLevel) { - return - } fields := []zap.Field{ zap.Uint64("sequence", pi.Sequence), zap.String("src_channel", pi.SourceChannel), @@ -172,44 +181,5 @@ func (ccp *CosmosChainProcessor) logPacketMessage(message string, pi provider.Pa if pi.TimeoutTimestamp > 0 { fields = append(fields, zap.Uint64("timeout_timestamp", pi.TimeoutTimestamp)) } - ccp.logObservedIBCMessage(message, fields...) -} - -func (ccp *CosmosChainProcessor) logChannelMessage(message string, ci provider.ChannelInfo) { - ccp.logObservedIBCMessage(message, - zap.String("channel_id", ci.ChannelID), - zap.String("port_id", ci.PortID), - zap.String("counterparty_channel_id", ci.CounterpartyChannelID), - zap.String("counterparty_port_id", ci.CounterpartyPortID), - zap.String("connection_id", ci.ConnID), - ) -} - -func (ccp *CosmosChainProcessor) logChannelOpenMessage(message string, ci provider.ChannelInfo) { - fields := []zap.Field{ - zap.String("channel_id", ci.ChannelID), - zap.String("connection_id", ci.ConnID), - zap.String("port_id", ci.PortID), - } - ccp.log.Info("Successfully created new channel", fields...) -} - -func (ccp *CosmosChainProcessor) logConnectionMessage(message string, ci provider.ConnectionInfo) { - ccp.logObservedIBCMessage(message, - zap.String("client_id", ci.ClientID), - zap.String("connection_id", ci.ConnID), - zap.String("counterparty_client_id", ci.CounterpartyClientID), - zap.String("counterparty_connection_id", ci.CounterpartyConnID), - ) -} - -func (ccp *CosmosChainProcessor) logClientICQMessage(icqType string, ci provider.ClientICQInfo) { - ccp.logObservedIBCMessage(icqType, - zap.String("type", ci.Type), - zap.String("query_id", string(ci.QueryID)), - zap.String("request", hex.EncodeToString(ci.Request)), - zap.String("chain_id", ci.Chain), - zap.String("connection_id", ci.Connection), - zap.Uint64("height", ci.Height), - ) + ccp.log.With(zap.String("event_type", message)).Debug("Retained packet message.", fields...) } diff --git a/relayer/chains/cosmos/query.go b/relayer/chains/cosmos/query.go index 15b15044e..f1333328c 100644 --- a/relayer/chains/cosmos/query.go +++ b/relayer/chains/cosmos/query.go @@ -324,7 +324,7 @@ func (cc *CosmosProvider) queryParamsSubspaceTime(ctx context.Context, subspace res, err := queryClient.Params(ctx, ¶ms) if err != nil { - return 0, fmt.Errorf("failed to make %s params request: %w", subspace, err) + return 0, fmt.Errorf("make %s params request: %w", subspace, err) } if res.Param.Value == "" { @@ -333,7 +333,7 @@ func (cc *CosmosProvider) queryParamsSubspaceTime(ctx context.Context, subspace unbondingValue, err := strconv.ParseUint(strings.ReplaceAll(res.Param.Value, `"`, ""), 10, 64) if err != nil { - return 0, fmt.Errorf("failed to parse %s from %s param: %w", key, subspace, err) + return 0, fmt.Errorf("parse %s from %s param: %w", key, subspace, err) } return time.Duration(unbondingValue), nil @@ -362,7 +362,7 @@ func (cc *CosmosProvider) QueryUnbondingPeriod(ctx context.Context) (time.Durati } return 0, - fmt.Errorf("failed to query unbonding period from ccvconsumer, staking & fallback : %w: %s : %s", consumerErr, stakingParamsErr.Error(), err.Error()) + fmt.Errorf("query unbonding period from ccvconsumer, staking & fallback : %w: %s : %s", consumerErr, stakingParamsErr.Error(), err.Error()) } // QueryTendermintProof performs an ABCI query with the given key and returns @@ -1200,7 +1200,7 @@ func (cc *CosmosProvider) QueryLatestHeight(ctx context.Context) (int64, error) func (cc *CosmosProvider) QueryStatus(ctx context.Context) (*coretypes.ResultStatus, error) { status, err := cc.RPCClient.Status(ctx) if err != nil { - return nil, fmt.Errorf("failed to query node status: %w", err) + return nil, fmt.Errorf("query node status: %w", err) } return status, nil } diff --git a/relayer/chains/cosmos/tx.go b/relayer/chains/cosmos/tx.go index 5f223fea3..0a5af93f3 100644 --- a/relayer/chains/cosmos/tx.go +++ b/relayer/chains/cosmos/tx.go @@ -127,8 +127,8 @@ func (cc *CosmosProvider) SendMessages(ctx context.Context, msgs []provider.Rela if err := retry.Do(func() error { return cc.SendMessagesToMempool(ctx, msgs, memo, ctx, []func(*provider.RelayerTxResponse, error){callback}) }, retry.Context(ctx), rtyAtt, rtyDel, rtyErr, retry.OnRetry(func(n uint, err error) { - cc.log.Info( - "Error building or broadcasting transaction", + cc.log.Debug( + "Retrying building or broadcasting transaction.", zap.String("chain_id", cc.PCfg.ChainID), zap.Uint("attempt", n+1), zap.Uint("max_attempts", rtyAttNum), @@ -168,12 +168,12 @@ func (cc *CosmosProvider) SendMessagesToMempool( for _, msg := range msgs { types = append(types, msg.Type()) } - cc.log.Debug("Sending messages to mempool", zap.Any("types", types), zap.Any("chain", cc.PCfg.ChainID)) + cc.log.Debug("Sending messages to mempool.", zap.Any("types", types), zap.Any("chain", cc.PCfg.ChainID)) } txSignerKey, feegranterKeyOrAddr, err := cc.buildSignerConfig(msgs) if err != nil { - return err + return fmt.Errorf("build signer config: %w", err) } sequenceGuard := ensureSequenceGuard(cc, txSignerKey) @@ -187,7 +187,7 @@ func (cc *CosmosProvider) SendMessagesToMempool( cc.handleAccountSequenceMismatchError(sequenceGuard, err) } - return err + return fmt.Errorf("build messages: %w", err) } if err := cc.broadcastTx(ctx, txBytes, msgs, fees, asyncCtx, defaultBroadcastWaitTimeout, asyncCallbacks); err != nil { @@ -195,7 +195,7 @@ func (cc *CosmosProvider) SendMessagesToMempool( cc.handleAccountSequenceMismatchError(sequenceGuard, err) } - return err + return fmt.Errorf("broadcast tx: %w", err) } cc.log.Debug("Transaction successfully sent to mempool", zap.String("chain", cc.PCfg.ChainID)) @@ -348,7 +348,7 @@ func (cc *CosmosProvider) SendMsgsWith(ctx context.Context, msgs []sdk.Msg, memo func (cc *CosmosProvider) sdkError(codespace string, code uint32) error { // ABCIError will return an error other than "unknown" if syncRes.Code is a registered error in syncRes.Codespace // This catches all of the sdk errors https://github.com/cosmos/cosmos-sdk/blob/f10f5e5974d2ecbf9efc05bc0bfe1c99fdeed4b6/types/errors/errors.go - err := errors.Unwrap(sdkerrors.ABCIError(codespace, code, "error broadcasting transaction")) + err := errors.Unwrap(sdkerrors.ABCIError(codespace, code, "broadcasting transaction")) if err.Error() != errUnknown { return err } @@ -375,7 +375,7 @@ func (cc *CosmosProvider) broadcastTx( if isErr && res == nil { // There are some cases where BroadcastTxSync will return an error but the associated // ResultBroadcastTx will be nil. - return err + return fmt.Errorf("broadcast tx sync: res is nil but got an err: %w", err) } rlyResp := &provider.RelayerTxResponse{ TxHash: res.Hash.String(), @@ -386,7 +386,7 @@ func (cc *CosmosProvider) broadcastTx( if isFailed { err = cc.sdkError(res.Codespace, res.Code) if err == nil { - err = fmt.Errorf("transaction failed to execute: codespace: %s, code: %d, log: %s", res.Codespace, res.Code, res.Log) + err = fmt.Errorf("broadcast tx sync faiure code: execute: codespace: %s, code: %d, log: %s", res.Codespace, res.Code, res.Log) } } cc.LogFailedTx(rlyResp, err, msgs) @@ -394,12 +394,16 @@ func (cc *CosmosProvider) broadcastTx( } address, err := cc.Address() if err != nil { - return fmt.Errorf("failed to get relayer bech32 wallet address: %w", err) + return fmt.Errorf("get relayer bech32 wallet address: %w", err) } cc.UpdateFeesSpent(cc.ChainId(), cc.Key(), address, fees) - // TODO: maybe we need to check if the node has tx indexing enabled? - // if not, we need to find a new way to block until inclusion in a block + /* + TODO(dym): look at this TODO from original authors: + """TODO: maybe we need to check if the node has tx indexing enabled? + if not, we need to find a new way to block until inclusion in a block""" + Relevant for us? + */ go cc.waitForTx(asyncCtx, res.Hash, msgs, asyncTimeout, asyncCallbacks) @@ -417,7 +421,7 @@ func (cc *CosmosProvider) waitForTx( ) { res, err := cc.waitForBlockInclusion(ctx, txHash, waitTimeout) if err != nil { - cc.log.Error("Failed to wait for block inclusion", zap.Error(err)) + cc.log.Error("Wait for block inclusion.", zap.Error(err)) if len(callbacks) > 0 { for _, cb := range callbacks { // Call each callback in order since waitForTx is already invoked asyncronously @@ -836,7 +840,7 @@ func (cc *CosmosProvider) PacketCommitment( key := host.PacketCommitmentKey(msgTransfer.SourcePort, msgTransfer.SourceChannel, msgTransfer.Sequence) commitment, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying comet proof for packet commitment: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying comet proof for packet commitment: %w", err) } // check if packet commitment exists if len(commitment) == 0 { @@ -877,7 +881,7 @@ func (cc *CosmosProvider) PacketAcknowledgement( key := host.PacketAcknowledgementKey(msgRecvPacket.DestPort, msgRecvPacket.DestChannel, msgRecvPacket.Sequence) ack, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying comet proof for packet acknowledgement: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying comet proof for packet acknowledgement: %w", err) } if len(ack) == 0 { return provider.PacketProof{}, chantypes.ErrInvalidAcknowledgement @@ -917,7 +921,7 @@ func (cc *CosmosProvider) PacketReceipt( key := host.PacketReceiptKey(msgTransfer.DestPort, msgTransfer.DestChannel, msgTransfer.Sequence) _, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying comet proof for packet receipt: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying comet proof for packet receipt: %w", err) } return provider.PacketProof{ @@ -937,7 +941,7 @@ func (cc *CosmosProvider) NextSeqRecv( key := host.NextSequenceRecvKey(msgTransfer.DestPort, msgTransfer.DestChannel) _, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying comet proof for next sequence receive: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying comet proof for next sequence receive: %w", err) } return provider.PacketProof{ @@ -1292,14 +1296,14 @@ func (cc *CosmosProvider) MsgUpdateClientHeader(latestHeader provider.IBCHeader, trustedValidatorsProto, err := trustedCosmosHeader.ValidatorSet.ToProto() if err != nil { - return nil, fmt.Errorf("error converting trusted validators to proto object: %w", err) + return nil, fmt.Errorf("converting trusted validators to proto object: %w", err) } signedHeaderProto := latestCosmosHeader.SignedHeader.ToProto() validatorSetProto, err := latestCosmosHeader.ValidatorSet.ToProto() if err != nil { - return nil, fmt.Errorf("error converting validator set to proto object: %w", err) + return nil, fmt.Errorf("converting validator set to proto object: %w", err) } return &tmclient.Header{ @@ -1321,7 +1325,7 @@ func (cc *CosmosProvider) QueryICQWithProof(ctx context.Context, path string, re res, err := cc.QueryABCI(ctx, req) if err != nil { - return provider.ICQProof{}, fmt.Errorf("failed to execute interchain query: %w", err) + return provider.ICQProof{}, fmt.Errorf("execute interchain query: %w", err) } return provider.ICQProof{ @@ -1509,14 +1513,14 @@ func (cc *CosmosProvider) InjectTrustedFields(ctx context.Context, header ibcexp return err }, retry.Context(ctx), rtyAtt, rtyDel, rtyErr); err != nil { return nil, fmt.Errorf( - "failed to get trusted header, please ensure header at the height %d has not been pruned by the connected node: %w", + "get trusted header, please ensure header at the height %d has not been pruned by the connected node: %w", h.TrustedHeight.RevisionHeight, err, ) } tvProto, err := trustedValidators.ToProto() if err != nil { - return nil, fmt.Errorf("failed to convert trusted validators to proto: %w", err) + return nil, fmt.Errorf("convert trusted validators to proto: %w", err) } // inject TrustedValidators into header @@ -1540,7 +1544,7 @@ func (cc *CosmosProvider) queryTMClientState(ctx context.Context, srch int64, sr clientState, ok := clientStateExported.(*tmclient.ClientState) if !ok { return &tmclient.ClientState{}, - fmt.Errorf("error when casting exported clientstate to tendermint type, got(%T)", clientStateExported) + fmt.Errorf("when casting exported clientstate to tendermint type, got(%T)", clientStateExported) } return clientState, nil @@ -1562,7 +1566,7 @@ func (cc *CosmosProvider) queryLocalhostClientState(ctx context.Context, srch in clientState, ok := clientStateExported.(*localhost.ClientState) if !ok { return &localhost.ClientState{}, - fmt.Errorf("error when casting exported clientstate to localhost client type, got(%T)", clientStateExported) + fmt.Errorf("when casting exported clientstate to localhost client type, got(%T)", clientStateExported) } return clientState, nil diff --git a/relayer/chains/mock/mock_chain_processor.go b/relayer/chains/mock/mock_chain_processor.go index f0e4b8173..881f08d2d 100644 --- a/relayer/chains/mock/mock_chain_processor.go +++ b/relayer/chains/mock/mock_chain_processor.go @@ -89,7 +89,7 @@ func (mcp *MockChainProcessor) Run(ctx context.Context, initialBlockHistory uint persistence.latestQueriedBlock = latestQueriedBlock } - mcp.log.Info("entering main query loop", zap.String("chain_id", mcp.chainID)) + mcp.log.Debug("Entering main query loop.", zap.String("chain_id", mcp.chainID)) ticker := time.NewTicker(minQueryLoopDuration) defer ticker.Stop() @@ -114,9 +114,9 @@ func (mcp *MockChainProcessor) queryCycle(ctx context.Context, persistence *quer if !mcp.inSync { if (persistence.latestHeight - persistence.latestQueriedBlock) < inSyncNumBlocksThreshold { mcp.inSync = true - mcp.log.Info("chain is in sync", zap.String("chain_id", mcp.chainID)) + mcp.log.Info("Chain in sync.", zap.String("chain_id", mcp.chainID)) } else { - mcp.log.Warn("chain is not yet in sync", + mcp.log.Error("Chain not in sync.", zap.String("chain_id", mcp.chainID), zap.Int64("latest_queried_block", persistence.latestQueriedBlock), zap.Int64("latest_height", persistence.latestHeight), @@ -175,7 +175,6 @@ func (mcp *MockChainProcessor) queryCycle(ctx context.Context, persistence *quer // now pass foundMessages to the path processors for _, pp := range mcp.pathProcessors { - mcp.log.Info("sending messages to path processor", zap.String("chain_id", mcp.chainID)) pp.HandleNewData(mcp.chainID, processor.ChainProcessorCacheData{ LatestBlock: provider.LatestBlock{ Height: uint64(i), @@ -185,6 +184,7 @@ func (mcp *MockChainProcessor) queryCycle(ctx context.Context, persistence *quer InSync: mcp.inSync, ChannelStateCache: channelStateCache, }) + mcp.log.Info("Sent messages to path processor.", zap.String("chain_id", mcp.chainID)) } persistence.latestQueriedBlock = i } diff --git a/relayer/chains/mock/mock_chain_processor_test.go b/relayer/chains/mock/mock_chain_processor_test.go index 5b370ac80..f846d3bfc 100644 --- a/relayer/chains/mock/mock_chain_processor_test.go +++ b/relayer/chains/mock/mock_chain_processor_test.go @@ -63,7 +63,7 @@ func TestMockChainAndPathProcessors(t *testing.T) { flushInterval := 6 * time.Hour pathProcessor := processor.NewPathProcessor(log, pathEnd1, pathEnd2, metrics, "", - clientUpdateThresholdTime, flushInterval, relayer.DefaultMaxMsgLength, 0, 1) + clientUpdateThresholdTime, flushInterval, relayer.DefaultMaxMsgLength, 0, 1, nil) eventProcessor := processor.NewEventProcessor(). WithChainProcessors( @@ -75,7 +75,7 @@ func TestMockChainAndPathProcessors(t *testing.T) { Build() err := eventProcessor.Run(ctx) - require.NoError(t, err, "error running event processor") + require.NoError(t, err, "running event processor") pathEnd1LeftoverMsgTransfer := pathProcessor.PathEnd1Messages(mockChannelKey1, chantypes.EventTypeSendPacket) pathEnd1LeftoverMsgRecvPacket := pathProcessor.PathEnd1Messages(mockChannelKey1, chantypes.EventTypeRecvPacket) diff --git a/relayer/chains/parsing.go b/relayer/chains/parsing.go index d51944067..d29e7c6e6 100644 --- a/relayer/chains/parsing.go +++ b/relayer/chains/parsing.go @@ -155,7 +155,7 @@ func (res *ClientInfo) parseClientAttribute(log *zap.Logger, attr sdk.Attribute) case clienttypes.AttributeKeyConsensusHeight: revisionSplit := strings.Split(attr.Value, "-") if len(revisionSplit) != 2 { - log.Error("Error parsing client consensus height", + log.Error("Parsing client consensus height.", zap.String("client_id", res.ClientID), zap.String("value", attr.Value), ) @@ -164,7 +164,7 @@ func (res *ClientInfo) parseClientAttribute(log *zap.Logger, attr sdk.Attribute) revisionNumberString := revisionSplit[0] revisionNumber, err := strconv.ParseUint(revisionNumberString, 10, 64) if err != nil { - log.Error("Error parsing client consensus height revision number", + log.Error("Parsing client consensus height revision number.", zap.Error(err), ) return @@ -172,7 +172,7 @@ func (res *ClientInfo) parseClientAttribute(log *zap.Logger, attr sdk.Attribute) revisionHeightString := revisionSplit[1] revisionHeight, err := strconv.ParseUint(revisionHeightString, 10, 64) if err != nil { - log.Error("Error parsing client consensus height revision height", + log.Error("Parsing client consensus height revision height.", zap.Error(err), ) return @@ -184,7 +184,7 @@ func (res *ClientInfo) parseClientAttribute(log *zap.Logger, attr sdk.Attribute) case clienttypes.AttributeKeyHeader: data, err := hex.DecodeString(attr.Value) if err != nil { - log.Error("Error parsing client header", + log.Error("Parsing client header.", zap.String("header", attr.Value), zap.Error(err), ) @@ -219,7 +219,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) case chantypes.AttributeKeySequence: res.Sequence, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { - log.Error("Error parsing packet sequence", + log.Error("Parsing packet sequence.", zap.String("value", attr.Value), zap.Error(err), ) @@ -228,7 +228,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) case chantypes.AttributeKeyTimeoutTimestamp: res.TimeoutTimestamp, err = strconv.ParseUint(attr.Value, 10, 64) if err != nil { - log.Error("Error parsing packet timestamp", + log.Error("Parsing packet timestamp.", zap.Uint64("sequence", res.Sequence), zap.String("value", attr.Value), zap.Error(err), @@ -241,7 +241,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) case chantypes.AttributeKeyDataHex: data, err := hex.DecodeString(attr.Value) if err != nil { - log.Error("Error parsing packet data", + log.Error("Parsing packet data.", zap.Uint64("sequence", res.Sequence), zap.Error(err), ) @@ -254,7 +254,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) case chantypes.AttributeKeyAckHex: data, err := hex.DecodeString(attr.Value) if err != nil { - log.Error("Error parsing packet ack", + log.Error("Parsing packet ack.", zap.Uint64("sequence", res.Sequence), zap.String("value", attr.Value), zap.Error(err), @@ -265,7 +265,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) case chantypes.AttributeKeyTimeoutHeight: timeoutSplit := strings.Split(attr.Value, "-") if len(timeoutSplit) != 2 { - log.Error("Error parsing packet height timeout", + log.Error("Parsing packet height timeout.", zap.Uint64("sequence", res.Sequence), zap.String("value", attr.Value), ) @@ -273,7 +273,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) } revisionNumber, err := strconv.ParseUint(timeoutSplit[0], 10, 64) if err != nil { - log.Error("Error parsing packet timeout height revision number", + log.Error("Parsing packet timeout height revision number.", zap.Uint64("sequence", res.Sequence), zap.String("value", timeoutSplit[0]), zap.Error(err), @@ -282,7 +282,7 @@ func (res *PacketInfo) parsePacketAttribute(log *zap.Logger, attr sdk.Attribute) } revisionHeight, err := strconv.ParseUint(timeoutSplit[1], 10, 64) if err != nil { - log.Error("Error parsing packet timeout height revision height", + log.Error("Parsing packet timeout height revision height.", zap.Uint64("sequence", res.Sequence), zap.String("value", timeoutSplit[1]), zap.Error(err), @@ -398,7 +398,7 @@ func (res *ClientICQInfo) MarshalLogObject(enc zapcore.ObjectEncoder) error { func (res *ClientICQInfo) ParseAttrs(log *zap.Logger, attrs []sdk.Attribute) { for _, attr := range attrs { if err := res.parseAttribute(attr); err != nil { - panic(fmt.Errorf("failed to parse attributes from client ICQ message: %w", err)) + panic(fmt.Errorf("parse attributes from client ICQ message: %w", err)) } } } diff --git a/relayer/chains/penumbra/log.go b/relayer/chains/penumbra/log.go index 32d249f87..49fbb63a7 100644 --- a/relayer/chains/penumbra/log.go +++ b/relayer/chains/penumbra/log.go @@ -57,7 +57,7 @@ func (cc *PenumbraProvider) LogFailedTx(res *provider.RelayerTxResponse, err err // Make a copy since we may continue to the warning errorFields := append(fields, zap.Error(err)) cc.log.Error( - "Failed sending cosmos transaction", + "Sending cosmos transaction.", errorFields..., ) @@ -68,7 +68,7 @@ func (cc *PenumbraProvider) LogFailedTx(res *provider.RelayerTxResponse, err err if res.Code != 0 && res.Data != "" { fields = append(fields, zap.Object("response", res)) - cc.log.Warn( + cc.log.Error( "Sent transaction but received failure response", fields..., ) @@ -102,12 +102,12 @@ func (cc *PenumbraProvider) LogSuccessTx(res *sdk.TxResponse, msgs []provider.Re } } else { cc.log.Debug( - "Failed to convert message to Tx type", + "convert message to Tx type", zap.Stringer("type", reflect.TypeOf(m)), ) } } else { - cc.log.Debug("Failed to unpack response Tx into sdk.Msg", zap.Error(err)) + cc.log.Debug("unpack response Tx into sdk.Msg", zap.Error(err)) } // Include the height, msgType, and tx_hash @@ -119,7 +119,7 @@ func (cc *PenumbraProvider) LogSuccessTx(res *sdk.TxResponse, msgs []provider.Re // Log the successful transaction with fields cc.log.Info( - "Successful transaction", + "Successful transaction.", fields..., ) } @@ -170,7 +170,7 @@ func getFeePayer(log *zap.Logger, cdc *codec.ProtoCodec, tx *typestx.Tx) string default: signers, _, err := cdc.GetMsgV1Signers(firstMsg) if err != nil { - log.Info("Could not get signers for msg when attempting to get the fee payer", zap.Error(err)) + log.Info("Could not get signers for msg when attempting to get the fee payer.", zap.Error(err)) return "" } diff --git a/relayer/chains/penumbra/message_handlers.go b/relayer/chains/penumbra/message_handlers.go index 1cdbe1867..ebbb4455a 100644 --- a/relayer/chains/penumbra/message_handlers.go +++ b/relayer/chains/penumbra/message_handlers.go @@ -26,7 +26,7 @@ func (pcp *PenumbraChainProcessor) handleMessage(m chains.IbcMessage, c processo func (pcp *PenumbraChainProcessor) handlePacketMessage(action string, pi provider.PacketInfo, c processor.IBCMessagesCache) { channelKey, err := processor.PacketInfoChannelKey(action, pi) if err != nil { - pcp.log.Error("Unexpected error handling packet message", + pcp.log.Error("Unexpected error handling packet message.", zap.String("action", action), zap.Uint64("sequence", pi.Sequence), zap.Any("channel", channelKey), @@ -36,7 +36,7 @@ func (pcp *PenumbraChainProcessor) handlePacketMessage(action string, pi provide } if !c.PacketFlow.ShouldRetainSequence(pcp.pathProcessors, channelKey, pcp.chainProvider.ChainId(), action, pi.Sequence) { - pcp.log.Warn("Not retaining packet message", + pcp.log.Error("Not retaining packet message.", zap.String("action", action), zap.Uint64("sequence", pi.Sequence), zap.Any("channel", channelKey), diff --git a/relayer/chains/penumbra/penumbra_chain_processor.go b/relayer/chains/penumbra/penumbra_chain_processor.go index 2944a4570..cf972e270 100644 --- a/relayer/chains/penumbra/penumbra_chain_processor.go +++ b/relayer/chains/penumbra/penumbra_chain_processor.go @@ -116,8 +116,8 @@ func (pcp *PenumbraChainProcessor) latestHeightWithRetry(ctx context.Context) (l latestHeight, err = pcp.chainProvider.QueryLatestHeight(latestHeightQueryCtx) return err }, retry.Context(ctx), retry.Attempts(latestHeightQueryRetries), retry.Delay(latestHeightQueryRetryDelay), retry.LastErrorOnly(true), retry.OnRetry(func(n uint, err error) { - pcp.log.Info( - "Failed to query latest height", + pcp.log.Debug( + "Retrying query latest height.", zap.Uint("attempt", n+1), zap.Uint("max_attempts", latestHeightQueryRetries), zap.Error(err), @@ -169,7 +169,7 @@ func (pcp *PenumbraChainProcessor) Run(ctx context.Context, initialBlockHistory latestHeight, err := pcp.latestHeightWithRetry(ctx) if err != nil { pcp.log.Error( - "Failed to query latest height after max attempts", + "Query latest height after max attempts.", zap.Uint("attempts", latestHeightQueryRetries), zap.Error(err), ) @@ -225,7 +225,7 @@ func (pcp *PenumbraChainProcessor) initializeConnectionState(ctx context.Context defer cancel() connections, err := pcp.chainProvider.QueryConnections(ctx) if err != nil { - return fmt.Errorf("error querying connections: %w", err) + return fmt.Errorf("querying connections: %w", err) } for _, c := range connections { pcp.connectionClients[c.Id] = c.ClientId @@ -245,7 +245,7 @@ func (pcp *PenumbraChainProcessor) initializeChannelState(ctx context.Context) e defer cancel() channels, err := pcp.chainProvider.QueryChannels(ctx) if err != nil { - return fmt.Errorf("error querying channels: %w", err) + return fmt.Errorf("querying channels: %w", err) } for _, ch := range channels { if len(ch.ConnectionHops) != 1 { @@ -281,11 +281,10 @@ type ResultBlockResults struct { func (pcp *PenumbraChainProcessor) queryCycle(ctx context.Context, persistence *queryCyclePersistence) error { var err error persistence.latestHeight, err = pcp.latestHeightWithRetry(ctx) - // don't want to cause CosmosChainProcessor to quit here, can retry again next cycle. if err != nil { pcp.log.Error( - "Failed to query latest height after max attempts", + "Query latest height after max attempts.", zap.Uint("attempts", latestHeightQueryRetries), zap.Error(err), ) @@ -303,9 +302,9 @@ func (pcp *PenumbraChainProcessor) queryCycle(ctx context.Context, persistence * if (persistence.latestHeight - persistence.latestQueriedBlock) < inSyncNumBlocksThreshold { pcp.inSync = true firstTimeInSync = true - pcp.log.Info("Chain is in sync") + pcp.log.Info("Chain in sync.") } else { - pcp.log.Info("Chain is not yet in sync", + pcp.log.Info("Chain not in sync.", zap.Int64("latest_queried_block", persistence.latestQueriedBlock), zap.Int64("latest_height", persistence.latestHeight), ) @@ -350,7 +349,7 @@ func (pcp *PenumbraChainProcessor) queryCycle(ctx context.Context, persistence * }) if err := eg.Wait(); err != nil { - pcp.log.Warn("Error querying block data", zap.Error(err)) + pcp.log.Error("Querying block data.", zap.Error(err)) break } @@ -404,7 +403,7 @@ func (pcp *PenumbraChainProcessor) queryCycle(ctx context.Context, persistence * clientID := pp.RelevantClientID(chainID) clientState, err := pcp.clientState(ctx, clientID) if err != nil { - pcp.log.Error("Error fetching client state", + pcp.log.Error("Fetching client state.", zap.String("client_id", clientID), zap.Error(err), ) diff --git a/relayer/chains/penumbra/query.go b/relayer/chains/penumbra/query.go index 9aa86c5e3..d47a3fd51 100644 --- a/relayer/chains/penumbra/query.go +++ b/relayer/chains/penumbra/query.go @@ -535,7 +535,6 @@ func (cc *PenumbraProvider) GenerateConnHandshakeProof(ctx context.Context, heig func (cc *PenumbraProvider) QueryChannel(ctx context.Context, height int64, channelid, portid string) (chanRes *chantypes.QueryChannelResponse, err error) { res, err := cc.queryChannelABCI(ctx, height, portid, channelid) if err != nil && strings.Contains(err.Error(), "not found") { - return &chantypes.QueryChannelResponse{ Channel: &chantypes.Channel{ State: chantypes.UNINITIALIZED, @@ -1002,12 +1001,12 @@ func (cc *PenumbraProvider) QueryRecvPacket( func (cc *PenumbraProvider) QueryStatus(ctx context.Context) (*coretypes.ResultStatus, error) { status, err := cc.RPCClient.Status(ctx) if err != nil { - return nil, fmt.Errorf("failed to query node status: %w", err) + return nil, fmt.Errorf("query node status: %w", err) } return status, nil } func (cc *PenumbraProvider) QueryICQWithProof(ctx context.Context, msgType string, request []byte, height uint64) (provider.ICQProof, error) { - //TODO implement me + // TODO implement me panic("implement me") } diff --git a/relayer/chains/penumbra/tx.go b/relayer/chains/penumbra/tx.go index 4475c89a7..558225a73 100644 --- a/relayer/chains/penumbra/tx.go +++ b/relayer/chains/penumbra/tx.go @@ -55,9 +55,7 @@ var ( errUnknown = "unknown" ) -var ( - defaultDelayPeriod = uint64(0) -) +var defaultDelayPeriod = uint64(0) // Strings for parsing events var ( @@ -301,7 +299,6 @@ func parseEventsFromABCIResponse(resp abci.ExecTxResult) []provider.RelayerEvent } func (cc *PenumbraProvider) sendMessagesInner(ctx context.Context, msgs []provider.RelayerMessage, _memo string) (*coretypes.ResultBroadcastTx, error) { - // TODO: fee estimation, fee payments // NOTE: we do not actually need to sign this tx currently, since there // are no fees required on the testnet. future versions of penumbra @@ -383,8 +380,8 @@ func (cc *PenumbraProvider) SendMessages(ctx context.Context, msgs []provider.Re events = append(events, parseEventsFromABCIResponse(res.TxResult)...) return nil }, retry.Context(ctx), rtyAtt, rtyDel, rtyErr, retry.OnRetry(func(n uint, err error) { - cc.log.Info( - "Error building or broadcasting transaction", + cc.log.Debug( + "Retrying building or broadcasting transaction.", zap.String("chain_id", cc.PCfg.ChainID), zap.Uint("attempt", n+1), zap.Uint("max_attempts", rtyAttNum), @@ -912,9 +909,11 @@ func (cc *PenumbraProvider) MsgUpgradeClient(srcClientId string, consRes *client return nil, err } - msgUpgradeClient := &clienttypes.MsgUpgradeClient{ClientId: srcClientId, ClientState: clientRes.ClientState, + msgUpgradeClient := &clienttypes.MsgUpgradeClient{ + ClientId: srcClientId, ClientState: clientRes.ClientState, ConsensusState: consRes.ConsensusState, ProofUpgradeClient: consRes.GetProof(), - ProofUpgradeConsensusState: consRes.ConsensusState.Value, Signer: acc} + ProofUpgradeConsensusState: consRes.ConsensusState.Value, Signer: acc, + } return cosmos.NewCosmosMessage(msgUpgradeClient, func(signer string) { msgUpgradeClient.Signer = signer @@ -1216,7 +1215,7 @@ func (cc *PenumbraProvider) PacketCommitment(ctx context.Context, msgTransfer pr key := host.PacketCommitmentKey(msgTransfer.SourcePort, msgTransfer.SourceChannel, msgTransfer.Sequence) _, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying tendermint proof for packet commitment: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying tendermint proof for packet commitment: %w", err) } return provider.PacketProof{ Proof: proof, @@ -1245,7 +1244,7 @@ func (cc *PenumbraProvider) PacketAcknowledgement(ctx context.Context, msgRecvPa key := host.PacketAcknowledgementKey(msgRecvPacket.DestPort, msgRecvPacket.DestChannel, msgRecvPacket.Sequence) _, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying tendermint proof for packet acknowledgement: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying tendermint proof for packet acknowledgement: %w", err) } return provider.PacketProof{ Proof: proof, @@ -1275,7 +1274,7 @@ func (cc *PenumbraProvider) PacketReceipt(ctx context.Context, msgTransfer provi key := host.PacketReceiptKey(msgTransfer.DestPort, msgTransfer.DestChannel, msgTransfer.Sequence) _, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying tendermint proof for packet receipt: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying tendermint proof for packet receipt: %w", err) } return provider.PacketProof{ @@ -1445,7 +1444,7 @@ func (cc *PenumbraProvider) NextSeqRecv( key := host.NextSequenceRecvKey(msgTransfer.DestPort, msgTransfer.DestChannel) _, proof, proofHeight, err := cc.QueryTendermintProof(ctx, int64(height), key) if err != nil { - return provider.PacketProof{}, fmt.Errorf("error querying tendermint proof for next sequence receive: %w", err) + return provider.PacketProof{}, fmt.Errorf("querying tendermint proof for next sequence receive: %w", err) } return provider.PacketProof{ @@ -1642,14 +1641,14 @@ func (cc *PenumbraProvider) MsgUpdateClientHeader( trustedValidatorsProto, err := trustedCosmosHeader.ValidatorSet.ToProto() if err != nil { - return nil, fmt.Errorf("error converting trusted validators to proto object: %w", err) + return nil, fmt.Errorf("converting trusted validators to proto object: %w", err) } signedHeaderProto := latestCosmosHeader.SignedHeader.ToProto() validatorSetProto, err := latestCosmosHeader.ValidatorSet.ToProto() if err != nil { - return nil, fmt.Errorf("error converting validator set to proto object: %w", err) + return nil, fmt.Errorf("converting validator set to proto object: %w", err) } return &tmclient.Header{ @@ -1763,13 +1762,17 @@ func (cc *PenumbraProvider) AcknowledgementFromSequence(ctx context.Context, dst } func rcvPacketQuery(channelID string, seq int) []string { - return []string{fmt.Sprintf("%s.packet_src_channel='%s'", spTag, channelID), - fmt.Sprintf("%s.packet_sequence='%d'", spTag, seq)} + return []string{ + fmt.Sprintf("%s.packet_src_channel='%s'", spTag, channelID), + fmt.Sprintf("%s.packet_sequence='%d'", spTag, seq), + } } func ackPacketQuery(channelID string, seq int) []string { - return []string{fmt.Sprintf("%s.packet_dst_channel='%s'", waTag, channelID), - fmt.Sprintf("%s.packet_sequence='%d'", waTag, seq)} + return []string{ + fmt.Sprintf("%s.packet_dst_channel='%s'", waTag, channelID), + fmt.Sprintf("%s.packet_sequence='%d'", waTag, seq), + } } // acknowledgementsFromResultTx looks through the events in a *ctypes.ResultTx and returns @@ -1786,7 +1789,6 @@ EventLoop: } for attributeKey, attributeValue := range event.Attributes { - switch attributeKey { case srcChanTag: if attributeValue != srcChanId { @@ -1811,7 +1813,7 @@ EventLoop: case toHeightTag: timeout, err := clienttypes.ParseHeight(attributeValue) if err != nil { - cc.log.Warn("error parsing height timeout", + cc.log.Error("Parsing height timeout.", zap.String("chain_id", cc.ChainId()), zap.Uint64("sequence", rp.seq), zap.Error(err), @@ -1822,7 +1824,7 @@ EventLoop: case toTSTag: timeout, err := strconv.ParseUint(attributeValue, 10, 64) if err != nil { - cc.log.Warn("error parsing timestamp timeout", + cc.log.Error("Parsing timestamp timeout.", zap.String("chain_id", cc.ChainId()), zap.Uint64("sequence", rp.seq), zap.Error(err), @@ -1833,7 +1835,7 @@ EventLoop: case seqTag: seq, err := strconv.ParseUint(attributeValue, 10, 64) if err != nil { - cc.log.Warn("error parsing packet sequence", + cc.log.Error("Parsing packet sequence.", zap.String("chain_id", cc.ChainId()), zap.Error(err), ) @@ -1958,7 +1960,7 @@ func (cc *PenumbraProvider) InjectTrustedFields(ctx context.Context, header ibce return err }, retry.Context(ctx), rtyAtt, rtyDel, rtyErr); err != nil { return nil, fmt.Errorf( - "failed to get trusted header, please ensure header at the height %d has not been pruned by the connected node: %w", + "get trusted header, please ensure header at the height %d has not been pruned by the connected node: %w", h.TrustedHeight.RevisionHeight, err, ) } @@ -1990,7 +1992,7 @@ func castClientStateToTMType(cs *codectypes.Any) (*tmclient.ClientState, error) clientState, ok := clientStateExported.(*tmclient.ClientState) if !ok { return &tmclient.ClientState{}, - fmt.Errorf("error when casting exported clientstate to tendermint type") + fmt.Errorf("when casting exported clientstate to tendermint type") } return clientState, nil @@ -2132,7 +2134,7 @@ func isQueryStoreWithProof(path string) bool { func (cc *PenumbraProvider) sdkError(codespace string, code uint32) error { // ABCIError will return an error other than "unknown" if syncRes.Code is a registered error in syncRes.Codespace // This catches all of the sdk errors https://github.com/cosmos/cosmos-sdk/blob/f10f5e5974d2ecbf9efc05bc0bfe1c99fdeed4b6/types/errors/errors.go - err := errors.Unwrap(sdkerrors.ABCIError(codespace, code, "error broadcasting transaction")) + err := errors.Unwrap(sdkerrors.ABCIError(codespace, code, "broadcasting transaction")) if err.Error() != errUnknown { return err } @@ -2196,7 +2198,7 @@ func (cc *PenumbraProvider) waitForTx( ) { res, err := cc.waitForBlockInclusion(ctx, txHash, waitTimeout) if err != nil { - cc.log.Error("Failed to wait for block inclusion", zap.Error(err)) + cc.log.Error("Wait for block inclusion.", zap.Error(err)) if callback != nil { callback(nil, err) } @@ -2278,7 +2280,7 @@ func (cc *PenumbraProvider) mkTxResult(resTx *coretypes.ResultTx) (*sdk.TxRespon } func (cc *PenumbraProvider) MsgSubmitQueryResponse(chainID string, queryID provider.ClientICQQueryID, proof provider.ICQProof) (provider.RelayerMessage, error) { - //TODO implement me + // TODO implement me panic("implement me") } @@ -2290,6 +2292,6 @@ func (cc *PenumbraProvider) SendMessagesToMempool(ctx context.Context, msgs []pr // MsgRegisterCounterpartyPayee creates an sdk.Msg to broadcast the counterparty address func (cc *PenumbraProvider) MsgRegisterCounterpartyPayee(portID, channelID, relayerAddr, counterpartyPayee string) (provider.RelayerMessage, error) { - //TODO implement me + // TODO implement me panic("implement me") } diff --git a/relayer/channel.go b/relayer/channel.go index 6a9c9e62d..8fdcf0232 100644 --- a/relayer/channel.go +++ b/relayer/channel.go @@ -62,9 +62,10 @@ func (c *Chain) CreateOpenChannels( DefaultMaxMsgLength, 0, 0, + nil, ) - c.log.Info("Starting event processor for channel handshake", + c.log.Info("Starting event processor for channel handshake.", zap.String("src_chain_id", c.PathEnd.ChainID), zap.String("src_port_id", srcPortID), zap.String("dst_chain_id", dst.PathEnd.ChainID), @@ -137,12 +138,13 @@ func (c *Chain) CloseChannel( DefaultMaxMsgLength, 0, 0, + nil, )). WithInitialBlockHistory(0). WithMessageLifecycle(&processor.FlushLifecycle{}). Build() - c.log.Info("Starting event processor for flush before channel close", + c.log.Info("Starting event processor for flush before channel close.", zap.String("src_chain_id", c.PathEnd.ChainID), zap.String("src_port_id", srcPortID), zap.String("dst_chain_id", dst.PathEnd.ChainID), @@ -155,7 +157,7 @@ func (c *Chain) CloseChannel( ctx, cancel := context.WithTimeout(ctx, processorTimeout) defer cancel() - c.log.Info("Starting event processor for channel close", + c.log.Info("Starting event processor for channel close.", zap.String("src_chain_id", c.PathEnd.ChainID), zap.String("src_port_id", srcPortID), zap.String("dst_chain_id", dst.PathEnd.ChainID), @@ -177,6 +179,7 @@ func (c *Chain) CloseChannel( DefaultMaxMsgLength, 0, 0, + nil, )). WithInitialBlockHistory(0). WithMessageLifecycle(&processor.ChannelCloseLifecycle{ diff --git a/relayer/client.go b/relayer/client.go index 0430e07d1..8e6fc5ed9 100644 --- a/relayer/client.go +++ b/relayer/client.go @@ -32,7 +32,7 @@ func (c *Chain) CreateClients(ctx context.Context, var err error srch, dsth, err = QueryLatestHeights(ctx, c, dst) if srch == 0 || dsth == 0 || err != nil { - return fmt.Errorf("failed to query latest heights: %w", err) + return fmt.Errorf("query latest heights: %w", err) } return nil }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr); err != nil { @@ -49,8 +49,8 @@ func (c *Chain) CreateClients(ctx context.Context, } return nil }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - c.log.Info( - "Failed to get light signed headers", + c.log.Debug( + "Retrying get light signed headers.", zap.String("src_chain_id", c.ChainID()), zap.Int64("src_height", srch), zap.String("dst_chain_id", dst.ChainID()), @@ -79,7 +79,7 @@ func (c *Chain) CreateClients(ctx context.Context, overrideUnbondingPeriod, maxClockDrift, customClientTrustingPeriodPercentage, memo) if err != nil { - return fmt.Errorf("failed to create client on src chain{%s}: %w", c.ChainID(), err) + return fmt.Errorf("create client on src chain{%s}: %w", c.ChainID(), err) } return nil }) @@ -94,7 +94,7 @@ func (c *Chain) CreateClients(ctx context.Context, overrideUnbondingPeriod, maxClockDrift, customClientTrustingPeriodPercentage, memo) if err != nil { - return fmt.Errorf("failed to create client on dst chain{%s}: %w", dst.ChainID(), err) + return fmt.Errorf("create client on dst chain{%s}: %w", dst.ChainID(), err) } return nil }) @@ -105,7 +105,7 @@ func (c *Chain) CreateClients(ctx context.Context, } c.log.Info( - "Clients created", + "Created clients.", zap.String("src_client_id", c.PathEnd.ClientID), zap.String("src_chain_id", c.ChainID()), zap.String("dst_client_id", dst.PathEnd.ClientID), @@ -150,7 +150,7 @@ func CreateClient( var err error tp, err = dst.GetTrustingPeriod(ctx, overrideUnbondingPeriod, customClientTrustingPeriodPercentage) if err != nil { - return fmt.Errorf("failed to get trusting period for chain{%s}: %w", dst.ChainID(), err) + return fmt.Errorf("get trusting period for chain{%s}: %w", dst.ChainID(), err) } if tp == 0 { return retry.Unrecoverable(fmt.Errorf("chain %s reported invalid zero trusting period", dst.ChainID())) @@ -176,7 +176,7 @@ func CreateClient( var err error ubdPeriod, err = dst.ChainProvider.QueryUnbondingPeriod(ctx) if err != nil { - return fmt.Errorf("failed to query unbonding period for chain{%s}: %w", dst.ChainID(), err) + return fmt.Errorf("query unbonding period for chain{%s}: %w", dst.ChainID(), err) } return nil }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr); err != nil { @@ -188,7 +188,7 @@ func CreateClient( // So we build a new client state from dst and attempt to use this for creating the light client on src. clientState, err := dst.ChainProvider.NewClientState(dst.ChainID(), dstUpdateHeader, tp, ubdPeriod, maxClockDrift, allowUpdateAfterExpiry, allowUpdateAfterMisbehaviour) if err != nil { - return "", fmt.Errorf("failed to create new client state for chain{%s}: %w", dst.ChainID(), err) + return "", fmt.Errorf("create new client state for chain{%s}: %w", dst.ChainID(), err) } var clientID string @@ -199,7 +199,7 @@ func CreateClient( // proposed new client state from dst. clientID, err = findMatchingClient(ctx, src, dst, clientState) if err != nil { - return "", fmt.Errorf("failed to find a matching client for the new client state: %w", err) + return "", fmt.Errorf("find a matching client for the new client state: %w", err) } } @@ -226,7 +226,7 @@ func CreateClient( // we need to sign with the address from src. createMsg, err := src.ChainProvider.MsgCreateClient(clientState, dstUpdateHeader.ConsensusState()) if err != nil { - return "", fmt.Errorf("failed to compose CreateClient msg for chain{%s} tracking the state of chain{%s}: %w", + return "", fmt.Errorf("compose CreateClient msg for chain{%s} tracking the state of chain{%s}: %w", src.ChainID(), dst.ChainID(), err) } @@ -241,7 +241,7 @@ func CreateClient( res, success, err = src.ChainProvider.SendMessages(ctx, msgs, memo) if err != nil { src.LogFailedTx(res, err, msgs) - return fmt.Errorf("failed to send messages on chain{%s}: %w", src.ChainID(), err) + return fmt.Errorf("send messages on chain{%s}: %w", src.ChainID(), err) } if !success { @@ -263,7 +263,7 @@ func CreateClient( src.PathEnd.ClientID = clientID src.log.Info( - "Client Created", + "Created client.", zap.String("src_chain_id", src.ChainID()), zap.String("src_client_id", src.PathEnd.ClientID), zap.String("dst_chain_id", dst.ChainID()), @@ -286,8 +286,8 @@ func MsgUpdateClient( dstClientState, err = dst.ChainProvider.QueryClientState(ctx, dsth, dst.ClientID()) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - dst.log.Info( - "Failed to query client state when updating clients", + dst.log.Debug( + "Retrying query client state when updating clients.", zap.String("client_id", dst.ClientID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -306,8 +306,8 @@ func MsgUpdateClient( srcHeader, err = src.ChainProvider.QueryIBCHeader(egCtx, srch) return err }, retry.Context(egCtx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query IBC header when building update client message", + src.log.Debug( + "Retrying query IBC header when building update client message.", zap.String("client_id", dst.ClientID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -321,8 +321,8 @@ func MsgUpdateClient( dstTrustedHeader, err = src.ChainProvider.QueryIBCHeader(egCtx, int64(dstClientState.GetLatestHeight().GetRevisionHeight())+1) return err }, retry.Context(egCtx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query IBC header when building update client message", + src.log.Debug( + "Retrying query IBC header when building update client message.", zap.String("client_id", dst.ClientID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -341,8 +341,8 @@ func MsgUpdateClient( updateHeader, err = src.ChainProvider.MsgUpdateClientHeader(srcHeader, dstClientState.GetLatestHeight().(clienttypes.Height), dstTrustedHeader) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to build update client header", + src.log.Debug( + "Retrying build update client header.", zap.String("client_id", dst.ClientID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -394,7 +394,7 @@ func UpdateClients( if err := result.Error(); err != nil { if result.PartiallySent() { src.log.Info( - "Partial success when updating clients", + "Partial success when updating clients.", zap.String("src_chain_id", src.ChainID()), zap.String("dst_chain_id", dst.ChainID()), zap.Object("send_result", result), @@ -404,7 +404,7 @@ func UpdateClients( } src.log.Info( - "Clients updated", + "Updated clients.", zap.String("src_chain_id", src.ChainID()), zap.String("src_client", src.PathEnd.ClientID), @@ -507,8 +507,8 @@ func findMatchingClient(ctx context.Context, src, dst *Chain, newClientState ibc } return nil }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query clients", + src.log.Debug( + "Retrying query clients.", zap.String("chain_id", src.ChainID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), diff --git a/relayer/codecs/ethermint/eip712.go b/relayer/codecs/ethermint/eip712.go index 9a5c17145..1dd6844e4 100644 --- a/relayer/codecs/ethermint/eip712.go +++ b/relayer/codecs/ethermint/eip712.go @@ -88,7 +88,7 @@ func decodeAminoSignDoc(signDocBytes []byte) (apitypes.TypedData, error) { for i, jsonMsg := range aminoDoc.Msgs { var m sdk.Msg if err := aminoCodec.UnmarshalJSON(jsonMsg, &m); err != nil { - return apitypes.TypedData{}, fmt.Errorf("failed to unmarshal sign doc message: %w", err) + return apitypes.TypedData{}, fmt.Errorf("unmarshal sign doc message: %w", err) } msgs[i] = m } diff --git a/relayer/codecs/ethermint/encoding.go b/relayer/codecs/ethermint/encoding.go index 2eb93a23a..21ba5f5ea 100644 --- a/relayer/codecs/ethermint/encoding.go +++ b/relayer/codecs/ethermint/encoding.go @@ -36,7 +36,7 @@ func WrapTxToTypedData( txData := make(map[string]interface{}) if err := json.Unmarshal(data, &txData); err != nil { - return apitypes.TypedData{}, errorsmod.Wrap(legacyerrors.ErrJSONUnmarshal, "failed to JSON unmarshal data") + return apitypes.TypedData{}, errorsmod.Wrap(legacyerrors.ErrJSONUnmarshal, "JSON unmarshal data") } domain := apitypes.TypedDataDomain{ @@ -354,7 +354,7 @@ func unpackAny(cdc codectypes.AnyUnpacker, field reflect.Value) (reflect.Type, r } if err := cdc.UnpackAny(any, &anyWrapper.Value); err != nil { - return nil, reflect.Value{}, errorsmod.Wrap(err, "failed to unpack Any in msg struct") + return nil, reflect.Value{}, errorsmod.Wrap(err, "unpack Any in msg struct") } fieldType := reflect.TypeOf(anyWrapper) @@ -465,7 +465,7 @@ func typToEth(typ reflect.Type) string { func doRecover(err *error) { if r := recover(); r != nil { if e, ok := r.(error); ok { - e = errorsmod.Wrap(e, "panicked with error") + e = errorsmod.Wrap(e, "panicked") *err = e return } diff --git a/relayer/connection.go b/relayer/connection.go index cb7aa3875..59c5cd7f4 100644 --- a/relayer/connection.go +++ b/relayer/connection.go @@ -43,6 +43,7 @@ func (c *Chain) CreateOpenConnections( DefaultMaxMsgLength, 0, 0, + nil, ) var connectionSrc, connectionDst string diff --git a/relayer/ics24.go b/relayer/ics24.go index 55da5ec6e..ec7f5e738 100644 --- a/relayer/ics24.go +++ b/relayer/ics24.go @@ -65,5 +65,5 @@ func (c *Chain) ErrPathNotSet() error { // ErrCantSetPath returns an error if the path doesn't set properly func (c *Chain) ErrCantSetPath(err error) error { - return fmt.Errorf("path on chain %s failed to set: %w", c.ChainID(), err) + return fmt.Errorf("path on chain %s set: %w", c.ChainID(), err) } diff --git a/relayer/log-chain.go b/relayer/log-chain.go index d0379d999..d52ad49d1 100644 --- a/relayer/log-chain.go +++ b/relayer/log-chain.go @@ -28,7 +28,6 @@ func logFailedTx(log *zap.Logger, chainID string, res *provider.RelayerTxRespons if err != nil { fields = append(fields, zap.Error(err)) } - log.Info("Failed sending transaction", fields...) if res != nil && res.Code != 0 && res.Data != "" { msgTypes := make([]string, len(msgs)) @@ -37,7 +36,7 @@ func logFailedTx(log *zap.Logger, chainID string, res *provider.RelayerTxRespons } log.Info( - "Sent transaction that resulted in error", + "Sent transaction that resulted in non success code.", zap.String("chain_id", chainID), zap.Int64("height", res.Height), zap.Strings("msg_types", msgTypes), @@ -47,7 +46,7 @@ func logFailedTx(log *zap.Logger, chainID string, res *provider.RelayerTxRespons } if res != nil { - log.Debug("Transaction response", zap.Object("resp", res)) + log.Debug("Transaction response.", zap.Object("resp", res)) } } @@ -73,7 +72,7 @@ func (c *Chain) errQueryUnrelayedPacketAcks() error { func (c *Chain) LogRetryGetIBCUpdateHeader(n uint, err error) { c.log.Info( - "Failed to get IBC update headers", + "get IBC update headers", zap.String("chain_id", c.ChainID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), diff --git a/relayer/naive-strategy.go b/relayer/naive-strategy.go index 72ee89a20..1e1ac9069 100644 --- a/relayer/naive-strategy.go +++ b/relayer/naive-strategy.go @@ -22,7 +22,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty srch, dsth, err := QueryLatestHeights(ctx, src, dst) if err != nil { - src.log.Error("Error querying latest heights", zap.Error(err)) + src.log.Error("Querying latest heights.", zap.Error(err)) return rs } @@ -46,8 +46,8 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty return nil } }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query packet commitments", + src.log.Debug( + "Retrying query packet commitments.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Uint("attempt", n+1), @@ -56,7 +56,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty ) })); err != nil { src.log.Error( - "Failed to query packet commitments after max retries", + "Query packet commitments after max retries.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Uint("attempts", RtyAttNum), @@ -87,8 +87,8 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty return nil } }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - dst.log.Info( - "Failed to query packet commitments", + dst.log.Debug( + "Retrying query packet commitments.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Uint("attempt", n+1), @@ -97,7 +97,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty ) })); err != nil { dst.log.Error( - "Failed to query packet commitments after max retries", + "Query packet commitments after max retries.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Uint("attempts", RtyAttNum), @@ -113,9 +113,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty wg.Wait() - var ( - srcUnreceivedPackets, dstUnreceivedPackets []uint64 - ) + var srcUnreceivedPackets, dstUnreceivedPackets []uint64 if len(srcPacketSeq) > 0 { wg.Add(1) @@ -127,8 +125,8 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty srcUnreceivedPackets, err = dst.ChainProvider.QueryUnreceivedPackets(ctx, uint64(dsth), srcChannel.Counterparty.ChannelId, srcChannel.Counterparty.PortId, srcPacketSeq) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - dst.log.Info( - "Failed to query unreceived packets", + dst.log.Debug( + "Retrying query unreceived packets.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Uint("attempt", n+1), @@ -137,7 +135,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty ) })); err != nil { dst.log.Error( - "Failed to query unreceived packets after max retries", + "Query unreceived packets after max retries.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Uint("attempts", RtyAttNum), @@ -157,8 +155,8 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty dstUnreceivedPackets, err = src.ChainProvider.QueryUnreceivedPackets(ctx, uint64(srch), srcChannel.ChannelId, srcChannel.PortId, dstPacketSeq) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query unreceived packets", + src.log.Debug( + "Retrying query unreceived packets.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Uint("attempt", n+1), @@ -167,7 +165,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty ) })); err != nil { src.log.Error( - "Failed to query unreceived packets after max retries", + "Query unreceived packets after max retries.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Uint("attempts", RtyAttNum), @@ -195,7 +193,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty nextSeqResp, err := dst.ChainProvider.QueryNextSeqRecv(ctx, dsth, srcChannel.Counterparty.ChannelId, srcChannel.Counterparty.PortId) if err != nil { dst.log.Error( - "Failed to query next packet receive sequence", + "Query next packet receive sequence.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Error(err), @@ -219,7 +217,7 @@ func UnrelayedSequences(ctx context.Context, src, dst *Chain, srcChannel *chanty nextSeqResp, err := src.ChainProvider.QueryNextSeqRecv(ctx, srch, srcChannel.ChannelId, srcChannel.PortId) if err != nil { src.log.Error( - "Failed to query next packet receive sequence", + "Query next packet receive sequence.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Error(err), @@ -250,7 +248,7 @@ func UnrelayedAcknowledgements(ctx context.Context, src, dst *Chain, srcChannel srch, dsth, err := QueryLatestHeights(ctx, src, dst) if err != nil { - src.log.Error("Error querying latest heights", zap.Error(err)) + src.log.Error("Querying latest heights.", zap.Error(err)) return rs } @@ -275,7 +273,7 @@ func UnrelayedAcknowledgements(ctx context.Context, src, dst *Chain, srcChannel } }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr); err != nil { src.log.Error( - "Failed to query packet acknowledgement commitments after max attempts", + "Query packet acknowledgement commitments after max attempts.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Uint("attempts", RtyAttNum), @@ -306,7 +304,7 @@ func UnrelayedAcknowledgements(ctx context.Context, src, dst *Chain, srcChannel } }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr); err != nil { dst.log.Error( - "Failed to query packet acknowledgement commitments after max attempts", + "Query packet acknowledgement commitments after max attempts.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Uint("attempts", RtyAttNum), @@ -332,7 +330,7 @@ func UnrelayedAcknowledgements(ctx context.Context, src, dst *Chain, srcChannel return err }, retry.Context(ctx), RtyErr, RtyAtt, RtyDel); err != nil { dst.log.Error( - "Failed to query unreceived acknowledgements after max attempts", + "Query unreceived acknowledgements after max attempts.", zap.String("channel_id", srcChannel.Counterparty.ChannelId), zap.String("port_id", srcChannel.Counterparty.PortId), zap.Uint("attempts", RtyAttNum), @@ -353,7 +351,7 @@ func UnrelayedAcknowledgements(ctx context.Context, src, dst *Chain, srcChannel return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr); err != nil { src.log.Error( - "Failed to query unreceived acknowledgements after max attempts", + "Query unreceived acknowledgements after max attempts.", zap.String("channel_id", srcChannel.ChannelId), zap.String("port_id", srcChannel.PortId), zap.Uint("attempts", RtyAttNum), @@ -430,7 +428,7 @@ func RelayAcknowledgements(ctx context.Context, log *zap.Logger, src, dst *Chain if !msgs.Ready() { log.Info( - "No acknowledgements to relay", + "No acknowledgements to relay.", zap.String("src_chain_id", src.ChainID()), zap.String("src_port_id", srcChannel.PortId), zap.String("dst_chain_id", dst.ChainID()), @@ -448,7 +446,7 @@ func RelayAcknowledgements(ctx context.Context, log *zap.Logger, src, dst *Chain if err := result.Error(); err != nil { if result.PartiallySent() { log.Info( - "Partial success when relaying acknowledgements", + "Partial success when relaying acknowledgements.", zap.String("src_chain_id", src.ChainID()), zap.String("src_port_id", srcChannel.PortId), zap.String("dst_chain_id", dst.ChainID()), @@ -505,7 +503,7 @@ func RelayPackets(ctx context.Context, log *zap.Logger, src, dst *Chain, sp Rela if !msgs.Ready() { log.Info( - "No packets to relay", + "No packets to relay.", zap.String("src_chain_id", src.ChainID()), zap.String("src_port_id", srcChannel.PortId), zap.String("dst_chain_id", dst.ChainID()), @@ -523,7 +521,7 @@ func RelayPackets(ctx context.Context, log *zap.Logger, src, dst *Chain, sp Rela if err := result.Error(); err != nil { if result.PartiallySent() { log.Info( - "Partial success when relaying packets", + "Partial success when relaying packets.", zap.String("src_chain_id", src.ChainID()), zap.String("src_port_id", srcChannel.PortId), zap.String("dst_chain_id", dst.ChainID()), @@ -542,7 +540,6 @@ func RelayPackets(ctx context.Context, log *zap.Logger, src, dst *Chain, sp Rela } return nil - } // AddMessagesForSequences constructs RecvMsgs and TimeoutMsgs from sequence numbers on a src chain @@ -567,7 +564,7 @@ func AddMessagesForSequences( ) if err != nil { src.log.Info( - "Failed to relay packet from sequence", + "Relay packet from sequence.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChanID), zap.String("src_port_id", srcPortID), diff --git a/relayer/packet-tx.go b/relayer/packet-tx.go index 490f18394..aa9b6f38f 100644 --- a/relayer/packet-tx.go +++ b/relayer/packet-tx.go @@ -46,12 +46,12 @@ func (c *Chain) SendTransferMsg( if toTimeOffset > 0 { clientStateRes, err := dst.ChainProvider.QueryClientStateResponse(ctx, dsth, dst.ClientID()) if err != nil { - return fmt.Errorf("failed to query the client state response: %w", err) + return fmt.Errorf("query the client state response: %w", err) } clientState, err := clienttypes.UnpackClientState(clientStateRes.ClientState) if err != nil { - return fmt.Errorf("failed to unpack client state: %w", err) + return fmt.Errorf("unpack client state: %w", err) } consensusStateRes, err := dst.ChainProvider.QueryClientConsensusState( @@ -61,12 +61,12 @@ func (c *Chain) SendTransferMsg( clientState.GetLatestHeight(), ) if err != nil { - return fmt.Errorf("failed to query client consensus state: %w", err) + return fmt.Errorf("query client consensus state: %w", err) } consensusState, err = clienttypes.UnpackConsensusState(consensusStateRes.ConsensusState) if err != nil { - return fmt.Errorf("failed to unpack consensus state: %w", err) + return fmt.Errorf("unpack consensus state: %w", err) } // use local clock time as reference time if it is later than the @@ -122,7 +122,7 @@ func (c *Chain) SendTransferMsg( if err := result.Error(); err != nil { if result.PartiallySent() { c.log.Info( - "Partial success when sending transfer", + "Partial success when sending transfer.", zap.String("src_chain_id", c.ChainID()), zap.String("dst_chain_id", dst.ChainID()), zap.Object("send_result", result), @@ -131,7 +131,7 @@ func (c *Chain) SendTransferMsg( return err } else if result.SuccessfullySent() { c.log.Info( - "Successfully sent a transfer", + "Successfully sent a transfer.", zap.String("src_chain_id", c.ChainID()), zap.String("dst_chain_id", dst.ChainID()), zap.Object("send_result", result), diff --git a/relayer/path.go b/relayer/path.go index 57b23a9b4..cf949bcc9 100644 --- a/relayer/path.go +++ b/relayer/path.go @@ -77,7 +77,7 @@ func (p Paths) PathsFromChains(src, dst string) (Paths, error) { } } if len(out) == 0 { - return Paths{}, fmt.Errorf("failed to find path in config between chains %s and %s", src, dst) + return Paths{}, fmt.Errorf("find path in config between chains %s and %s", src, dst) } return out, nil } diff --git a/relayer/processor/message_processor.go b/relayer/processor/message_processor.go index 40dc90b4a..f041efb9d 100644 --- a/relayer/processor/message_processor.go +++ b/relayer/processor/message_processor.go @@ -170,19 +170,6 @@ func (mp *messageProcessor) shouldUpdateClientNow(ctx context.Context, src, dst mp.metrics.SetClientTrustingPeriod(src.info.PathName, dst.info.ChainID, dst.info.ClientID, time.Duration(dst.clientState.TrustingPeriod)) } - mp.log.Debug("should update client now?", - zap.String("path_name", src.info.PathName), - zap.String("chain_id", dst.info.ChainID), - zap.String("client_id", dst.info.ClientID), - zap.Int64("trusting_period", dst.clientState.TrustingPeriod.Milliseconds()), - zap.Int64("time_since_client_update", time.Since(consensusHeightTime).Milliseconds()), - zap.Int64("client_threshold_time", mp.clientUpdateThresholdTime.Milliseconds()), - zap.Bool("enough_blocks_passed", enoughBlocksPassed), - zap.Bool("past_two_thirds_trusting_period", pastTwoThirdsTrustingPeriod), - zap.Bool("past_configured_client_update_threshold", pastConfiguredClientUpdateThreshold), - zap.Bool("should_update_client_now", shouldUpdateClientNow), - ) - return shouldUpdateClientNow, nil } @@ -246,13 +233,12 @@ func (mp *messageProcessor) assembleMessage( mp.trackMessage(msg.tracker(assembled), i) wg.Done() if err != nil { - dst.log.Error(fmt.Sprintf("Error assembling message: %s", msg.msgType()), + dst.log.Error(fmt.Sprintf("Assemble message: %s.", msg.msgType()), zap.Object("msg", msg), zap.Error(err), ) return } - dst.log.Debug(fmt.Sprintf("Assembled message: %s", msg.msgType()), zap.Object("msg", msg)) } // assembleMsgUpdateClient uses the ChainProvider from both pathEnds to assemble the client update header @@ -279,11 +265,11 @@ func (mp *messageProcessor) assembleMsgUpdateClient(ctx context.Context, src, ds header, err := src.chainProvider.QueryIBCHeader(ctx, int64(clientConsensusHeight.RevisionHeight+1)) if err != nil { - return fmt.Errorf("error getting IBC header at height: %d for chain_id: %s, %w", + return fmt.Errorf("getting IBC header at height: %d for chain_id: %s, %w", clientConsensusHeight.RevisionHeight+1, src.info.ChainID, err) } - mp.log.Debug("Had to query for client trusted IBC header", + mp.log.Debug("Queried for client trusted IBC header.", zap.String("path_name", src.info.PathName), zap.String("chain_id", src.info.ChainID), zap.String("counterparty_chain_id", dst.info.ChainID), @@ -392,7 +378,7 @@ func (mp *messageProcessor) sendClientUpdate( msgs := []provider.RelayerMessage{mp.msgUpdateClient} if err := dst.chainProvider.SendMessagesToMempool(broadcastCtx, msgs, mp.memo, ctx, nil); err != nil { - mp.log.Error("Error sending client update message", + mp.log.Error("Sending client update message.", zap.String("path_name", src.info.PathName), zap.String("src_chain_id", src.info.ChainID), zap.String("dst_chain_id", dst.info.ChainID), @@ -448,7 +434,7 @@ func (mp *messageProcessor) sendBatchMessages( } } - dst.log.Debug("Will relay messages", fields...) + dst.log.Debug("sendBatchMessages: will relay messages", fields...) callback := func(_ *provider.RelayerTxResponse, err error) { for _, t := range batch { @@ -506,10 +492,9 @@ func (mp *messageProcessor) sendBatchMessages( mp.metricParseTxFailureCatagory(err, src) if errors.Is(err, chantypes.ErrRedundantTx) { - mp.log.Debug("Redundant message(s)", errFields...) return } - mp.log.Error("Sending messages from batch", errFields...) + mp.log.Error("Sending messages from batch.", errFields...) return } dst.log.Debug("Message broadcast completed", fields...) @@ -593,14 +578,13 @@ func (mp *messageProcessor) sendSingleMessage( errFields = append(errFields, zap.Object("msg", tracker)) errFields = append(errFields, zap.Error(err)) if errors.Is(err, chantypes.ErrRedundantTx) { - mp.log.Debug(fmt.Sprintf("Redundant %s message", msgType), errFields...) return } - mp.log.Error(fmt.Sprintf("Error broadcasting %s message", msgType), errFields...) + mp.log.Error(fmt.Sprintf("Broadcasting message: %s.", msgType), errFields...) return } - dst.log.Debug(fmt.Sprintf("Successfully broadcasted %s message", msgType), zap.Object("msg", tracker)) + dst.log.Debug(fmt.Sprintf("Successfully broadcasted message: %s.", msgType), zap.Object("msg", tracker)) } func (mp *messageProcessor) metricParseTxFailureCatagory(err error, src *pathEndRuntime) { diff --git a/relayer/processor/path_end_runtime.go b/relayer/processor/path_end_runtime.go index 6cad29102..40fb088d7 100644 --- a/relayer/processor/path_end_runtime.go +++ b/relayer/processor/path_end_runtime.go @@ -181,12 +181,12 @@ func (pathEnd *pathEndRuntime) mergeMessageCache( newPc := make(PacketSequenceCache) for seq, p := range pCache { if err := checkMemoLimit(p.Data, memoLimit); err != nil { - pathEnd.log.Warn("Ignoring packet", zap.Error(err)) + pathEnd.log.Error("Ignoring packet.", zap.Error(err)) continue } if err := checkMaxReceiverSize(p.Data, maxReceiverSize); err != nil { - pathEnd.log.Warn("Ignoring packet", zap.Error(err)) + pathEnd.log.Error("Ignoring packet.", zap.Error(err)) continue } @@ -292,7 +292,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache } channelKey, err := PacketInfoChannelKey(m.Termination.EventType, m.Termination.Info) if err != nil { - pathEnd.log.Error("Unexpected error checking packet message", + pathEnd.log.Error("Unexpected error checking packet message.", zap.String("event_type", m.Termination.EventType), zap.Inline(channelKey), zap.Error(err), @@ -312,7 +312,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache return false } // stop path processor, condition fulfilled - pathEnd.log.Info("Found termination condition for packet flow") + pathEnd.log.Info("Found termination condition for packet flow.") return true case *ChannelMessageLifecycle: if m.Termination == nil || m.Termination.ChainID != pathEnd.info.ChainID { @@ -328,7 +328,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache foundCounterpartyChannelID := m.Termination.Info.CounterpartyChannelID == "" foundCounterpartyPortID := m.Termination.Info.CounterpartyPortID == "" for _, ci := range cache { - pathEnd.log.Info("Channel handshake termination candidate", + pathEnd.log.Info("Channel handshake termination candidate.", zap.String("termination_port_id", m.Termination.Info.PortID), zap.String("observed_port_id", ci.PortID), zap.String("termination_counterparty_port_id", m.Termination.Info.CounterpartyPortID), @@ -348,7 +348,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache } } if foundChannelID && foundPortID && foundCounterpartyChannelID && foundCounterpartyPortID { - pathEnd.log.Info("Found termination condition for channel handshake") + pathEnd.log.Info("Found termination condition for channel handshake.") return true } case *ChannelCloseLifecycle: @@ -360,7 +360,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache foundChannelID := m.SrcChannelID == "" foundPortID := m.SrcPortID == "" for _, ci := range cache { - pathEnd.log.Info("Channel close termination candidate", + pathEnd.log.Info("Channel close termination candidate.", zap.String("termination_port_id", m.SrcPortID), zap.String("observed_port_id", ci.PortID), zap.String("termination_channel_id", m.SrcChannelID), @@ -383,7 +383,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache } } if foundChannelID && foundPortID { - pathEnd.log.Info("Found termination condition for channel close") + pathEnd.log.Info("Found termination condition for channel close.") return true } case *ConnectionMessageLifecycle: @@ -400,7 +400,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache foundCounterpartyClientID := m.Termination.Info.CounterpartyClientID == "" foundCounterpartyConnectionID := m.Termination.Info.CounterpartyConnID == "" for _, ci := range cache { - pathEnd.log.Info("Connection handshake termination candidate", + pathEnd.log.Info("Connection handshake termination candidate.", zap.String("termination_client_id", m.Termination.Info.ClientID), zap.String("observed_client_id", ci.ClientID), zap.String("termination_counterparty_client_id", m.Termination.Info.CounterpartyClientID), @@ -420,7 +420,7 @@ func (pathEnd *pathEndRuntime) shouldTerminate(ibcMessagesCache IBCMessagesCache } } if foundClientID && foundConnectionID && foundCounterpartyClientID && foundCounterpartyConnectionID { - pathEnd.log.Info("Found termination condition for connection handshake") + pathEnd.log.Info("Found termination condition for connection handshake.") return true } } @@ -487,8 +487,7 @@ func (pathEnd *pathEndRuntime) mergeCacheData( terminate, err := pathEnd.checkForMisbehaviour(ctx, pathEnd.clientState, counterParty) if err != nil { - pathEnd.log.Error( - "Failed to check for misbehaviour", + pathEnd.log.Error("Check for misbehaviour.", zap.String("client_id", pathEnd.info.ClientID), zap.Error(err), ) @@ -535,7 +534,7 @@ func (pathEnd *pathEndRuntime) shouldSendPacketMessage(message packetIBCMessage, sequence := message.info.Sequence k, err := message.channelKey() if err != nil { - pathEnd.log.Error("Unexpected error checking if should send packet message", + pathEnd.log.Error("Unexpected checking if should send packet message.", zap.String("event_type", eventType), zap.Uint64("sequence", sequence), zap.Inline(k), @@ -550,7 +549,7 @@ func (pathEnd *pathEndRuntime) shouldSendPacketMessage(message packetIBCMessage, } if message.info.Height >= pathEndForHeight.latestBlock.Height { - pathEnd.log.Debug("Waiting to relay packet message until counterparty height has incremented", + pathEnd.log.Debug("Not relaying packet message until counterparty height has incremented", zap.String("event_type", eventType), zap.Uint64("sequence", sequence), zap.Uint64("message_height", message.info.Height), @@ -561,7 +560,7 @@ func (pathEnd *pathEndRuntime) shouldSendPacketMessage(message packetIBCMessage, } if !pathEnd.channelStateCache[k].Open { // channel is not open, do not send - pathEnd.log.Warn("Refusing to relay packet message because channel is not open", + pathEnd.log.Error("Refusing to relay packet message because channel is not open.", zap.String("event_type", eventType), zap.Uint64("sequence", sequence), zap.Inline(k), @@ -595,7 +594,7 @@ func (pathEnd *pathEndRuntime) shouldSendPacketMessage(message packetIBCMessage, } if inProgress.retryCount >= maxMessageSendRetries { - pathEnd.log.Error("Giving up on sending packet message after max retries", + pathEnd.log.Error("Giving up on sending packet message after max retries.", zap.String("event_type", eventType), zap.Uint64("sequence", sequence), zap.Inline(k), @@ -681,7 +680,7 @@ func (pathEnd *pathEndRuntime) shouldSendConnectionMessage(message connectionIBC return false } if inProgress.retryCount >= maxMessageSendRetries { - pathEnd.log.Error("Giving up on sending connection message after max retries", + pathEnd.log.Error("Giving up on sending connection message after max retries.", zap.String("event_type", eventType), ) // giving up on sending this connection handshake message @@ -760,7 +759,7 @@ func (pathEnd *pathEndRuntime) shouldSendChannelMessage(message channelIBCMessag return false } if inProgress.retryCount >= maxMessageSendRetries { - pathEnd.log.Error("Giving up on sending channel message after max retries", + pathEnd.log.Error("Giving up on sending channel message after max retries.", zap.String("event_type", eventType), zap.Int("max_retries", maxMessageSendRetries), ) @@ -851,7 +850,7 @@ func (pathEnd *pathEndRuntime) shouldSendClientICQMessage(message provider.Clien return false } if inProgress.retryCount >= maxMessageSendRetries { - pathEnd.log.Error("Giving up on sending client ICQ message after max retries", + pathEnd.log.Error("Giving up on sending client ICQ message after max retries.", zap.String("query_id", string(queryID)), ) @@ -875,7 +874,7 @@ func (pathEnd *pathEndRuntime) trackProcessingMessage(tracker messageToTrack) ui sequence := t.msg.info.Sequence channelKey, err := t.msg.channelKey() if err != nil { - pathEnd.log.Error("Unexpected error tracking processing packet", + pathEnd.log.Error("Unexpected error tracking processing packet.", zap.Inline(channelKey), zap.String("event_type", eventType), zap.Uint64("sequence", sequence), diff --git a/relayer/processor/path_processor.go b/relayer/processor/path_processor.go index 15de7d0c0..85a3892d5 100644 --- a/relayer/processor/path_processor.go +++ b/relayer/processor/path_processor.go @@ -2,6 +2,7 @@ package processor import ( "context" + "errors" "fmt" "time" @@ -77,7 +78,8 @@ type PathProcessor struct { maxMsgs uint64 memoLimit, maxReceiverSize int - metrics *PrometheusMetrics + metrics *PrometheusMetrics + SkippedPacketsHandlingConfig *SkippedPacketsHandlingConfig } // PathProcessors is a slice of PathProcessor instances @@ -102,22 +104,24 @@ func NewPathProcessor( flushInterval time.Duration, maxMsgs uint64, memoLimit, maxReceiverSize int, + skippedPacketsHandlingCfg *SkippedPacketsHandlingConfig, ) *PathProcessor { isLocalhost := pathEnd1.ClientID == ibcexported.LocalhostClientID pp := &PathProcessor{ - log: log, - pathEnd1: newPathEndRuntime(log, pathEnd1, metrics), - pathEnd2: newPathEndRuntime(log, pathEnd2, metrics), - retryProcess: make(chan struct{}, 2), - memo: memo, - clientUpdateThresholdTime: clientUpdateThresholdTime, - flushInterval: flushInterval, - metrics: metrics, - isLocalhost: isLocalhost, - maxMsgs: maxMsgs, - memoLimit: memoLimit, - maxReceiverSize: maxReceiverSize, + log: log, + pathEnd1: newPathEndRuntime(log, pathEnd1, metrics), + pathEnd2: newPathEndRuntime(log, pathEnd2, metrics), + retryProcess: make(chan struct{}, 2), + memo: memo, + clientUpdateThresholdTime: clientUpdateThresholdTime, + flushInterval: flushInterval, + metrics: metrics, + isLocalhost: isLocalhost, + maxMsgs: maxMsgs, + memoLimit: memoLimit, + maxReceiverSize: maxReceiverSize, + SkippedPacketsHandlingConfig: skippedPacketsHandlingCfg, } if flushInterval == 0 { pp.disablePeriodicFlush() @@ -275,7 +279,7 @@ func (pp *PathProcessor) ProcessBacklogIfReady() { default: // Log that the channel is saturated; // something is wrong if we are retrying this quickly. - pp.log.Error("Failed to enqueue path processor retry, retries already scheduled") + pp.log.Error("Enqueue path processor retry, retries already scheduled.") } } @@ -295,10 +299,40 @@ func (pp *PathProcessor) HandleNewData(chainID string, cacheData ChainProcessorC func (pp *PathProcessor) handleFlush(ctx context.Context) { flushTimer := pp.flushInterval - pp.log.Debug("Flushing PathProcessor (handleFlush)") if err := pp.flush(ctx); err != nil { - pp.log.Warn("Flush not complete", zap.Error(err)) flushTimer = flushFailureRetry + var se SkippedError + if errors.As(err, &se) && pp.SkippedPacketsHandlingConfig != nil { + /* + An error will have been returned if there are many outstanding packets on any chain which for which an ack has not yet been produced + This is normal if using rollups with delayed ack middleware, so we make sure to handle this gracefully: + instead of treating this as a failure and retrying after 5 seconds, we may configure to continue without retrying + */ + var counterPartyChain string + if pp.pathEnd1.info.ChainID == pp.SkippedPacketsHandlingConfig.HubChainID { + counterPartyChain = pp.pathEnd2.info.ChainID + } + if pp.pathEnd2.info.ChainID == pp.SkippedPacketsHandlingConfig.HubChainID { + counterPartyChain = pp.pathEnd1.info.ChainID + } + + for chain, chanSkipped := range se.packets { + skippedPacketsOnCounterparty := chain == counterPartyChain + onlySkippedPacketsOnCounterparty := len(se.packets) == 1 + if skippedPacketsOnCounterparty && onlySkippedPacketsOnCounterparty { + for _, skipped := range chanSkipped { + onlySkippedAcks := skipped.Recv == 0 && 0 < skipped.Ack + doNotComplainAboutSkippedAcks := pp.SkippedPacketsHandlingConfig.IgnoreHubAcksWhenFlushing + if onlySkippedAcks && doNotComplainAboutSkippedAcks { + flushTimer = pp.flushInterval // do not retry soon + pp.log.Debug("Flush: skipped some acks, but continuing as normal.") + } + } + } + } + + } + pp.log.Error("Flush. Will try again later.", zap.Error(err), zap.Duration("until next attempt (seconds)", flushTimer)) } pp.flushTimer.Stop() pp.flushTimer = time.NewTimer(flushTimer) @@ -382,7 +416,7 @@ func (pp *PathProcessor) processAvailableSignals(ctx context.Context, cancel fun pp.maxReceiverSize, ) } - // Periodic flush to clear out any old packets + pp.log.Debug("Flushing due to timer firing.") pp.handleFlush(ctx) } return false @@ -407,16 +441,15 @@ func (pp *PathProcessor) Run(ctx context.Context, cancel func()) { } } - pp.log.Debug("path processor run: are the chains in sync? ", zap.Bool("pathEnd1", pp.pathEnd1.inSync), zap.Bool("pathEnd2", pp.pathEnd2.inSync)) if !pp.pathEnd1.inSync || !pp.pathEnd2.inSync { continue } if pp.shouldFlush() && !pp.initialFlushComplete { + pp.log.Debug("Flushing: should flush and initial flush is not yet complete.") pp.handleFlush(ctx) pp.initialFlushComplete = true } else if pp.shouldTerminateForFlushComplete() { - pp.log.Debug("PathProcessor terminating due to flush completion. Blocking until finished. CTRL-C!") /* NOTE: it is possible that there are still outstanding broadcasts @@ -430,7 +463,7 @@ func (pp *PathProcessor) Run(ctx context.Context, cancel func()) { // process latest message cache state from both pathEnds if err := pp.processLatestMessages(ctx, cancel); err != nil { - pp.log.Debug("error process latest messages", zap.Error(err)) + pp.log.Debug("ERROR process latest messages", zap.Error(err)) // in case of IBC message send errors, schedule retry after durationErrorRetry if retryTimer != nil { retryTimer.Stop() @@ -469,13 +502,13 @@ func (pp *PathProcessor) handleLocalhostData(cacheData ChainProcessorCacheData) for k, v := range cacheData.IBCMessagesCache.PacketFlow { chan1, err := chantypes.ParseChannelSequence(k.ChannelID) if err != nil { - pp.log.Error("Failed to parse channel ID int from string", zap.Error(err)) + pp.log.Error("Parse channel ID int from string.", zap.Error(err)) continue } chan2, err := chantypes.ParseChannelSequence(k.CounterpartyChannelID) if err != nil { - pp.log.Error("Failed to parse channel ID int from string", zap.Error(err)) + pp.log.Error("Parse channel ID int from string.", zap.Error(err)) continue } @@ -517,7 +550,7 @@ func (pp *PathProcessor) handleLocalhostData(cacheData ChainProcessorCacheData) pathEnd2Cache.IBCMessagesCache.ChannelHandshake[eventType][k] = v default: - pp.log.Error("Invalid IBC channel event type", zap.String("event_type", eventType)) + pp.log.Error("Invalid IBC channel event type.", zap.String("event_type", eventType)) } } } diff --git a/relayer/processor/path_processor_internal.go b/relayer/processor/path_processor_internal.go index 02b5d6503..5f13e8623 100644 --- a/relayer/processor/path_processor_internal.go +++ b/relayer/processor/path_processor_internal.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "sort" + "strings" "sync" "github.com/cosmos/relayer/v2/dymutils/gerr" @@ -59,7 +60,7 @@ func (pp *PathProcessor) getMessagesToSend( dstChan, dstPort := m.info.DestChannel, m.info.DestPort res, err := dst.chainProvider.QueryNextSeqRecv(ctx, 0, dstChan, dstPort) if err != nil { - dst.log.Error("Failed to query next sequence recv", + dst.log.Error("Query next sequence recv", zap.String("channel_id", dstChan), zap.String("port_id", dstPort), zap.Error(err), @@ -71,7 +72,7 @@ func (pp *PathProcessor) getMessagesToSend( srcChan, srcPort := m.info.SourceChannel, m.info.SourcePort res, err := src.chainProvider.QueryNextSeqAck(ctx, 0, srcChan, srcPort) if err != nil { - src.log.Error("Failed to query next sequence ack", + src.log.Error("Query next sequence ack", zap.String("channel_id", srcChan), zap.String("port_id", srcPort), zap.Error(err), @@ -767,7 +768,7 @@ func (pp *PathProcessor) queuePreInitMessages(cancel func()) { eventType, ok := observedEventTypeForDesiredMessage[m.Initial.EventType] if !ok { pp.log.Error( - "Failed to queue initial connection message, event type not handled", + "Queue initial connection message, event type not handled.", zap.String("event_type", m.Initial.EventType), ) cancel() @@ -797,7 +798,7 @@ func (pp *PathProcessor) queuePreInitMessages(cancel func()) { eventType, ok := observedEventTypeForDesiredMessage[m.Initial.EventType] if !ok { pp.log.Error( - "Failed to queue initial connection message, event type not handled", + "Queue initial connection message, event type not handled.", zap.String("event_type", m.Initial.EventType), ) cancel() @@ -828,7 +829,7 @@ func (pp *PathProcessor) queuePreInitMessages(cancel func()) { eventType, ok := observedEventTypeForDesiredMessage[m.Initial.EventType] if !ok { pp.log.Error( - "Failed to queue initial channel message, event type not handled", + "Queue initial channel message, event type not handled.", zap.String("event_type", m.Initial.EventType), ) cancel() @@ -862,7 +863,7 @@ func (pp *PathProcessor) queuePreInitMessages(cancel func()) { break } if counterpartyState, ok := pp.pathEnd2.channelStateCache[k.Counterparty()]; ok && !counterpartyState.Open { - pp.log.Info("Channel already closed on both sides") + pp.log.Info("Channel already closed on both sides.") cancel() return } @@ -888,7 +889,7 @@ func (pp *PathProcessor) queuePreInitMessages(cancel func()) { break } if counterpartyChanState, ok := pp.pathEnd1.channelStateCache[k.Counterparty()]; ok && !counterpartyChanState.Open { - pp.log.Info("Channel already closed on both sides") + pp.log.Info("Channel already closed on both sides.") cancel() return } @@ -1067,7 +1068,7 @@ func (pp *PathProcessor) processLatestMessages(ctx context.Context, cancel func( } if pathEnd1Messages.size() != 0 || pathEnd2Messages.size() != 0 { - pp.log.Debug("Processing some messages", + pp.log.Debug("Processing latest messages", zap.Any("path1ChainID", pp.pathEnd1.info.ChainID), zap.Any("pathEnd1Messages", pathEnd1Messages.debugString()), zap.Any("path2ChainID", pp.pathEnd2.info.ChainID), @@ -1080,14 +1081,14 @@ func (pp *PathProcessor) processLatestMessages(ctx context.Context, cancel func( eg.Go(func() error { mp := newMessageProcessor(pp.log, pp.metrics, pp.memo, pp.clientUpdateThresholdTime, pp.isLocalhost) if err := mp.processMessages(ctx, pathEnd1Messages, pp.pathEnd2, pp.pathEnd1); err != nil { - return fmt.Errorf("process path end 1 messages: %w", err) + return fmt.Errorf("process path end 1 messages: dst: %s: %w", pp.pathEnd1.info.ChainID, err) } return nil }) eg.Go(func() error { mp := newMessageProcessor(pp.log, pp.metrics, pp.memo, pp.clientUpdateThresholdTime, pp.isLocalhost) if err := mp.processMessages(ctx, pathEnd2Messages, pp.pathEnd1, pp.pathEnd2); err != nil { - return fmt.Errorf("process path end 2 messages: %w", err) + return fmt.Errorf("process path end 2 messages: dst: %s: %w", pp.pathEnd2.info.ChainID, err) } return nil }) @@ -1187,8 +1188,6 @@ func queryPacketCommitments( mu sync.Locker, ) func() error { return func() error { - pathEnd.log.Debug("Flushing", zap.String("channel", k.ChannelID), zap.String("port", k.PortID)) - c, err := pathEnd.chainProvider.QueryPacketCommitments(ctx, pathEnd.latestBlock.Height, k.ChannelID, k.PortID) if err != nil { return err @@ -1212,6 +1211,29 @@ type skippedPackets struct { Ack uint64 } +type SkippedPacketsHandlingConfig struct { + HubChainID string + IgnoreHubAcksWhenFlushing bool +} + +type SkippedError struct { + // chain id -> channel -> skipped packets + packets map[string]map[ChannelKey]skippedPackets +} + +func (s SkippedError) Error() string { + sb := strings.Builder{} + for chainID, chainSkipped := range s.packets { + for channelKey, skipped := range chainSkipped { + sb.WriteString(fmt.Sprintf( + "{ %s %s %s recv: %d, number of committed packets for which acks need to be relayed: %d } ", + chainID, channelKey.ChannelID, channelKey.PortID, skipped.Recv, skipped.Ack, + )) + } + } + return sb.String() +} + // queuePendingRecvAndAcks returns the number of packets skipped during a flush (nil if none). func (pp *PathProcessor) queuePendingRecvAndAcks( ctx context.Context, @@ -1399,18 +1421,25 @@ SeqLoop: seq := seq - dst.log.Debug("Querying recv packet", - zap.String("channel", k.CounterpartyChannelID), - zap.String("port", k.CounterpartyPortID), - zap.Uint64("sequence", seq), - ) - eg.Go(func() error { recvPacket, err := dst.chainProvider.QueryRecvPacket(ctx, k.CounterpartyChannelID, k.CounterpartyPortID, seq) - if err != nil { - if !errors.Is(err, gerr.ErrNotFound) { - return fmt.Errorf("query recv packet: seq: dst: %s: %d: %w", dst.info.ChainID, seq, err) - } + + if err != nil && !errors.Is(err, gerr.ErrNotFound) { + return fmt.Errorf("query recv packet: seq: dst: %s: %d: %w", dst.info.ChainID, seq, err) + } + ackFound := true + if errors.Is(err, gerr.ErrNotFound) { + ackFound = false + } + dst.log.Debug("Queried to see if ack exists on chain", + zap.String("chain", dst.info.ChainID), + zap.String("channel", k.CounterpartyChannelID), + zap.String("port", k.CounterpartyPortID), + zap.Uint64("sequence", seq), + zap.Bool("ack exists", ackFound), + ) + + if !ackFound { /* It's possible that an acknowledgement event was not yet published on the dst chain */ @@ -1438,16 +1467,10 @@ SeqLoop: if len(unackedAndWillAck) > 0 { dst.log.Debug( - "Will flush MsgAcknowledgement", + "Will flush an Ack.", zap.Object("channel", k), zap.Uint64s("sequences", unackedAndWillAck), ) - } else { - dst.log.Debug( - "No MsgAcknowledgement to flush", - zap.String("channel", k.CounterpartyChannelID), - zap.String("port", k.CounterpartyPortID), - ) } return skipped, nil @@ -1550,19 +1573,7 @@ func (pp *PathProcessor) flush(ctx context.Context) error { pp.pathEnd2.mergeMessageCache(pathEnd2Cache, pp.pathEnd1.info.ChainID, pp.pathEnd1.inSync, pp.memoLimit, pp.maxReceiverSize) if len(skipped) > 0 { - skippedPacketsString := "" - for chainID, chainSkipped := range skipped { - for channelKey, skipped := range chainSkipped { - skippedPacketsString += fmt.Sprintf( - "{ %s %s %s recv: %d, ack: %d } ", - chainID, channelKey.ChannelID, channelKey.PortID, skipped.Recv, skipped.Ack, - ) - } - } - return fmt.Errorf( - "flush was successful, but packets are still pending. %s", - skippedPacketsString, - ) + return SkippedError{skipped} } return nil @@ -1622,6 +1633,6 @@ func (pp *PathProcessor) shouldTerminateForFlushComplete() bool { } } } - pp.log.Info("Found termination condition for flush, all caches cleared") + pp.log.Info("Found termination condition for flush, all caches cleared.") return true } diff --git a/relayer/processor/types_internal.go b/relayer/processor/types_internal.go index 7e4cb902d..7fc23b99e 100644 --- a/relayer/processor/types_internal.go +++ b/relayer/processor/types_internal.go @@ -120,7 +120,7 @@ func (msg packetIBCMessage) assemble( var err error proof, err = packetProof(ctx, msg.info, src.latestBlock.Height) if err != nil { - return nil, fmt.Errorf("error querying packet proof: %w", err) + return nil, fmt.Errorf("querying packet proof: %w", err) } return assembleMessage(msg.info, proof) } @@ -210,7 +210,7 @@ func (msg channelIBCMessage) assemble( if chanProof != nil { proof, err = chanProof(ctx, msg.info, src.latestBlock.Height) if err != nil { - return nil, fmt.Errorf("error querying channel proof: %w", err) + return nil, fmt.Errorf("querying channel proof: %w", err) } } return assembleMessage(msg.info, proof) @@ -283,7 +283,7 @@ func (msg connectionIBCMessage) assemble( if connProof != nil { proof, err = connProof(ctx, msg.info, src.latestBlock.Height) if err != nil { - return nil, fmt.Errorf("error querying connection proof: %w", err) + return nil, fmt.Errorf("querying connection proof: %w", err) } } @@ -337,7 +337,7 @@ func (msg clientICQMessage) assemble( proof, err := src.chainProvider.QueryICQWithProof(ctx, msg.info.Type, msg.info.Request, src.latestBlock.Height-1) if err != nil { - return nil, fmt.Errorf("error during interchain query: %w", err) + return nil, fmt.Errorf("during interchain query: %w", err) } return dst.chainProvider.MsgSubmitQueryResponse(msg.info.Chain, msg.info.QueryID, proof) diff --git a/relayer/provider/matcher.go b/relayer/provider/matcher.go index e871c4d86..315786418 100644 --- a/relayer/provider/matcher.go +++ b/relayer/provider/matcher.go @@ -187,7 +187,7 @@ func checkTendermintMisbehaviour( tmHeader, ok := header.(TendermintIBCHeader) if !ok { - return nil, fmt.Errorf("failed to check for misbehaviour, expected %T, got %T", (*TendermintIBCHeader)(nil), header) + return nil, fmt.Errorf("check for misbehaviour, expected %T, got %T", (*TendermintIBCHeader)(nil), header) } trustedHeader, err = tmHeader.TMHeader() diff --git a/relayer/query.go b/relayer/query.go index 0046ed598..41245bab3 100644 --- a/relayer/query.go +++ b/relayer/query.go @@ -54,8 +54,8 @@ func QueryClientStates(ctx context.Context, } return nil }, retry.Context(egCtx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query client state when updating clients", + src.log.Debug( + "Retrying query client state when updating clients.", zap.String("client_id", src.ClientID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -73,8 +73,8 @@ func QueryClientStates(ctx context.Context, } return nil }, retry.Context(egCtx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - dst.log.Info( - "Failed to query client state when updating clients", + dst.log.Debug( + "Retrying query client state when updating clients.", zap.String("client_id", dst.ClientID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -109,8 +109,8 @@ func QueryChannel(ctx context.Context, src *Chain, channelID string) (*chantypes srcChannels, err = src.ChainProvider.QueryConnectionChannels(ctx, srch, src.ConnectionID()) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query connection channels", + src.log.Debug( + "Retrying query connection channels.", zap.String("conn_id", src.ConnectionID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -152,8 +152,8 @@ func QueryPortChannel(ctx context.Context, src *Chain, portID string) (*chantype srcChannels, err = src.ChainProvider.QueryConnectionChannels(ctx, srch, src.ConnectionID()) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query connection channels", + src.log.Debug( + "Retrying query connection channels.", zap.String("conn_id", src.ConnectionID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -322,7 +322,6 @@ func SPrintClientExpiration(chain *Chain, expiration time.Time, clientInfo Clien chain.ClientID(), chain.ChainID(), status, expirationFormatted, remainingTime.Round(time.Second), clientInfo.LatestHeight.GetRevisionHeight(), clientInfo.TrustingPeriod.String(), clientInfo.UnbondingTime.Round(time.Second)) return legacyOutput - } // Returns clientExpiration data in JSON format. diff --git a/relayer/relaymsgs_test.go b/relayer/relaymsgs_test.go index dab099072..800370d90 100644 --- a/relayer/relaymsgs_test.go +++ b/relayer/relaymsgs_test.go @@ -147,7 +147,7 @@ func TestRelayMsgs_Send_Success(t *testing.T) { func TestRelayMsgs_Send_Errors(t *testing.T) { t.Run("one batch and one error", func(t *testing.T) { - srcErr := fmt.Errorf("source error") + srcErr := fmt.Errorf("source ") src := relayer.RelayMsgSender{ ChainID: "src", SendMessages: func(ctx context.Context, msgs []provider.RelayerMessage, memo string) (*provider.RelayerTxResponse, bool, error) { @@ -155,7 +155,7 @@ func TestRelayMsgs_Send_Errors(t *testing.T) { }, } - dstErr := fmt.Errorf("dest error") + dstErr := fmt.Errorf("dest ") dst := relayer.RelayMsgSender{ ChainID: "dst", SendMessages: func(ctx context.Context, msgs []provider.RelayerMessage, memo string) (*provider.RelayerTxResponse, bool, error) { @@ -233,7 +233,7 @@ func TestRelayMsgs_Send_Errors(t *testing.T) { }) t.Run("two batches with success then error", func(t *testing.T) { - srcErr := fmt.Errorf("source error") + srcErr := fmt.Errorf("source ") var srcCalls int src := relayer.RelayMsgSender{ ChainID: "src", @@ -250,7 +250,7 @@ func TestRelayMsgs_Send_Errors(t *testing.T) { }, } - dstErr := fmt.Errorf("dest error") + dstErr := fmt.Errorf("dest ") var dstCalls int dst := relayer.RelayMsgSender{ ChainID: "dst", diff --git a/relayer/strategies.go b/relayer/strategies.go index 274e38a2b..858a2fb1e 100644 --- a/relayer/strategies.go +++ b/relayer/strategies.go @@ -49,6 +49,7 @@ func StartRelayer( initialBlockHistory uint64, metrics *processor.PrometheusMetrics, stuckPacket *processor.StuckPacket, + skippedPacketsHandlingCfg *processor.SkippedPacketsHandlingConfig, ) chan error { // prevent incorrect bech32 address prefixed addresses when calling AccAddress.String() sdk.SetAddrCacheEnabled(false) @@ -110,6 +111,7 @@ func StartRelayer( errorChan, metrics, stuckPacket, + skippedPacketsHandlingCfg, ) return errorChan case ProcessorLegacy: @@ -167,6 +169,7 @@ func relayerStartEventProcessor( errCh chan<- error, metrics *processor.PrometheusMetrics, stuckPacket *processor.StuckPacket, + skippedPacketsHandlingCfg *processor.SkippedPacketsHandlingConfig, ) { defer close(errCh) @@ -187,6 +190,7 @@ func relayerStartEventProcessor( maxMsgLength, memoLimit, maxReceiverSize, + skippedPacketsHandlingCfg, )) } @@ -219,7 +223,7 @@ func relayerStartLegacy( if errors.Is(err, context.Canceled) { errCh <- err } else { - errCh <- fmt.Errorf("error querying all channels on chain{%s}@connection{%s}: %w", + errCh <- fmt.Errorf("querying all channels on chain{%s}@connection{%s}: %w", src.ChainID(), src.ConnectionID(), err) } return @@ -281,8 +285,8 @@ func relayerStartLegacy( } return nil }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query channel for updated state", + src.log.Debug( + "Retrying query channel for updated state.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", channel.channel.ChannelId), zap.Uint("attempt", n+1), @@ -298,7 +302,7 @@ func relayerStartLegacy( if queryChannelResp.Channel.State != types.OPEN { delete(srcOpenChannels, channel.channel.ChannelId) src.log.Info( - "Channel is no longer in open state", + "Channel is no longer in open state.", zap.String("chain_id", src.ChainID()), zap.String("channel_id", channel.channel.ChannelId), zap.String("channel_state", queryChannelResp.Channel.State.String()), @@ -322,8 +326,8 @@ func queryChannelsOnConnection(ctx context.Context, src *Chain) ([]*types.Identi srcChannels, err = src.ChainProvider.QueryConnectionChannels(ctx, srch, src.ConnectionID()) return err }, retry.Context(ctx), RtyAtt, RtyDel, RtyErr, retry.OnRetry(func(n uint, err error) { - src.log.Info( - "Failed to query connection channels", + src.log.Debug( + "Retrying query connection channels.", zap.String("conn_id", src.ConnectionID()), zap.Uint("attempt", n+1), zap.Uint("max_attempts", RtyAttNum), @@ -429,7 +433,7 @@ func relayUnrelayedPackets(ctx context.Context, log *zap.Logger, src, dst *Chain if len(sp.Src) > 0 { src.log.Info( - "Unrelayed source packets", + "Unrelayed source packets.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.Uint64s("seqs", sp.Src), @@ -438,7 +442,7 @@ func relayUnrelayedPackets(ctx context.Context, log *zap.Logger, src, dst *Chain if len(sp.Dst) > 0 { src.log.Info( - "Unrelayed destination packets", + "Unrelayed destination packets.", zap.String("dst_chain_id", dst.ChainID()), zap.String("dst_channel_id", srcChannel.Counterparty.ChannelId), zap.Uint64s("seqs", sp.Dst), @@ -449,8 +453,8 @@ func relayUnrelayedPackets(ctx context.Context, log *zap.Logger, src, dst *Chain // If there was a context cancellation or deadline while attempting to relay packets, // log that and indicate failure. if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { - log.Warn( - "Context finished while waiting for RelayPackets to complete", + log.Error( + "Context finished while waiting for RelayPackets to complete.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.String("dst_chain_id", dst.ChainID()), @@ -462,8 +466,8 @@ func relayUnrelayedPackets(ctx context.Context, log *zap.Logger, src, dst *Chain // If we encounter an error that suggest node configuration issues, log a more insightful error message. if strings.Contains(err.Error(), "Internal error: transaction indexing is disabled") { - log.Warn( - "Remote server needs to enable transaction indexing", + log.Error( + "Remote server needs to enable transaction indexing.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.String("dst_chain_id", dst.ChainID()), @@ -474,8 +478,8 @@ func relayUnrelayedPackets(ctx context.Context, log *zap.Logger, src, dst *Chain } // Otherwise, not a context error, but an application-level error. - log.Warn( - "Relay packets error", + log.Error( + "Relay packets.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.String("dst_chain_id", dst.ChainID()), @@ -512,7 +516,7 @@ func relayUnrelayedAcks(ctx context.Context, log *zap.Logger, src, dst *Chain, m if len(ap.Src) > 0 { log.Info( - "Unrelayed source acknowledgements", + "Unrelayed source acknowledgements.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.Uint64s("acks", ap.Src), @@ -521,7 +525,7 @@ func relayUnrelayedAcks(ctx context.Context, log *zap.Logger, src, dst *Chain, m if len(ap.Dst) > 0 { log.Info( - "Unrelayed destination acknowledgements", + "Unrelayed destination acknowledgements.", zap.String("dst_chain_id", dst.ChainID()), zap.String("dst_channel_id", srcChannel.Counterparty.ChannelId), zap.Uint64s("acks", ap.Dst), @@ -532,8 +536,8 @@ func relayUnrelayedAcks(ctx context.Context, log *zap.Logger, src, dst *Chain, m // If there was a context cancellation or deadline while attempting to relay acknowledgements, // log that and indicate failure. if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { - log.Warn( - "Context finished while waiting for RelayAcknowledgements to complete", + log.Error( + "Context finished while waiting for RelayAcknowledgements to complete.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.String("dst_chain_id", dst.ChainID()), @@ -544,8 +548,8 @@ func relayUnrelayedAcks(ctx context.Context, log *zap.Logger, src, dst *Chain, m } // Otherwise, not a context error, but an application-level error. - log.Warn( - "Relay acknowledgements error", + log.Error( + "Relay acknowledgements.", zap.String("src_chain_id", src.ChainID()), zap.String("src_channel_id", srcChannel.ChannelId), zap.String("dst_chain_id", dst.ChainID()),