diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 91ab74e788..3faba43d7b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,11 +1,11 @@ # public -/.github @zhangzhw8 @gaohongsong @iSecloud -/docs @zhangzhw8 @gaohongsong @iSecloud -/*.md @zhangzhw8 @gaohongsong @iSecloud -/helm-charts @zhangzhw8 @gaohongsong @iSecloud +/.github @zhangzhw8 @iSecloud +/docs @zhangzhw8 @iSecloud +/*.md @zhangzhw8 @iSecloud +/helm-charts @zhangzhw8 @iSecloud # dbm-ui -/dbm-ui @zhangzhw8 @gaohongsong @iSecloud +/dbm-ui @zhangzhw8 @iSecloud /dbm-ui/frontend @hLinx @jinquantianxia # dbm-services common @@ -13,7 +13,7 @@ /dbm-services/common/db-config @seanlook @xfwduke /dbm-services/common/db-resource @ymakedaq @seanlook @xfwduke /dbm-services/common/db-dns @omg-by @xiepaup @lukemakeit -/dbm-services/common/dbha @zyqlzr @xjxia +/dbm-services/common/dbha @xjxia # bigdata /dbm-services/bigdata @zhangrq5 @zvictorino @wangyao963 @@ -23,7 +23,7 @@ # mysql /dbm-services/mysql @seanlook @xfwduke @yksitu @ymakedaq -/dbm-services/mysql/db-partition @fanfanyangyang @xfwduke @seanlook +/dbm-services/mysql/db-partition @fanfanyangyang @xfan0805 /dbm-services/mysql/db-priv @fanfanyangyang @xfwduke @seanlook /dbm-services/mysql/db-remote-service @xfwduke @seanlook /dbm-services/mysql/db-simulation @seanlook @xfwduke @ymakedaq diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml index 8d620650ff..4cebfe021b 100644 --- a/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml +++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml @@ -10,6 +10,7 @@ data: active_db_type: [ "tendbha:backend", "tendbha:proxy", + "riak" ] city: "3" campus: "深圳" @@ -57,6 +58,8 @@ data: proxy_pass: "proxy-conn-pass" timeout: 10 redis: + riak: + timeout: 10 dns: bind_conf: host: "bind-api-host" diff --git a/dbm-services/common/dbha/ha-module/ha.yaml b/dbm-services/common/dbha/ha-module/ha.yaml index 8c1e77969b..fc7932e612 100644 --- a/dbm-services/common/dbha/ha-module/ha.yaml +++ b/dbm-services/common/dbha/ha-module/ha.yaml @@ -9,6 +9,7 @@ agent_conf: active_db_type: [ "tendbha:backend", "tendbha:proxy", + "riak" ] city: "3" campus: "深圳" @@ -59,6 +60,8 @@ db_conf: timeout: 10 redis: timeout: 10 + riak: + timeout: 10 password_conf: host: "bind-api-host" port: 80 diff --git a/dbm-services/mysql/db-partition/handler/handler.go b/dbm-services/mysql/db-partition/handler/handler.go index 1957ab9d6d..06b553224a 100644 --- a/dbm-services/mysql/db-partition/handler/handler.go +++ b/dbm-services/mysql/db-partition/handler/handler.go @@ -179,6 +179,26 @@ func DisablePartition(r *gin.Context) { return } +// DisablePartitionByCluster 用于集群禁用时停止分区,标志为 offlinewithclu +func DisablePartitionByCluster(r *gin.Context) { + var input service.DisablePartitionInput + if err := r.ShouldBind(&input); err != nil { + err = errno.ErrReadEntity.Add(err.Error()) + slog.Error(err.Error()) + SendResponse(r, err, nil) + return + } + slog.Info(fmt.Sprintf("ids: %v, operator: %s", input.Ids, input.Operator)) + err := input.DisablePartitionConfigByCluster() + if err != nil { + slog.Error(err.Error()) + SendResponse(r, errors.New(fmt.Sprintf("分区禁用失败!%s", err.Error())), nil) + return + } + SendResponse(r, nil, "分区禁用成功!") + return +} + // EnablePartition TODO func EnablePartition(r *gin.Context) { var input service.EnablePartitionInput @@ -199,6 +219,26 @@ func EnablePartition(r *gin.Context) { return } +// EnablePartitionByCluster 集群启用时启用分区 +func EnablePartitionByCluster(r *gin.Context) { + var input service.EnablePartitionInput + if err := r.ShouldBind(&input); err != nil { + err = errno.ErrReadEntity.Add(err.Error()) + slog.Error(err.Error()) + SendResponse(r, err, nil) + return + } + slog.Info(fmt.Sprintf("ids: %v, operator: %s", input.Ids, input.Operator)) + err := input.EnablePartitionByCluster() + if err != nil { + slog.Error(err.Error()) + SendResponse(r, errors.New(fmt.Sprintf("分区启用失败!%s", err.Error())), nil) + return + } + SendResponse(r, nil, "分区启用成功!") + return +} + // UpdatePartitionsConfig TODO func UpdatePartitionsConfig(r *gin.Context) { var input service.CreatePartitionsInput diff --git a/dbm-services/mysql/db-partition/main.go b/dbm-services/mysql/db-partition/main.go index 238e77974e..56c50079df 100644 --- a/dbm-services/mysql/db-partition/main.go +++ b/dbm-services/mysql/db-partition/main.go @@ -4,10 +4,6 @@ import ( "net/http" "os" - "dbm-services/common/go-pubpkg/apm/metric" - "dbm-services/common/go-pubpkg/apm/trace" - "dbm-services/mysql/db-partition/monitor" - "github.com/gin-gonic/gin" "github.com/golang-migrate/migrate/v4" flag "github.com/spf13/pflag" @@ -15,6 +11,10 @@ import ( "go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin" "golang.org/x/exp/slog" + "dbm-services/common/go-pubpkg/apm/metric" + "dbm-services/common/go-pubpkg/apm/trace" + "dbm-services/mysql/db-partition/monitor" + "dbm-services/mysql/db-partition/assests" "dbm-services/mysql/db-partition/cron" "dbm-services/mysql/db-partition/model" diff --git a/dbm-services/mysql/db-partition/router/router.go b/dbm-services/mysql/db-partition/router/router.go index 2738acf5e8..3689e3d4b6 100644 --- a/dbm-services/mysql/db-partition/router/router.go +++ b/dbm-services/mysql/db-partition/router/router.go @@ -21,6 +21,8 @@ func RegisterRouter(engine *gin.Engine) { p.POST("/dry_run", handler.DryRun) p.POST("/disable_partition", handler.DisablePartition) p.POST("/enable_partition", handler.EnablePartition) + p.POST("/disable_partition_cluster", handler.DisablePartitionByCluster) + p.POST("/enable_partition_cluster", handler.EnablePartitionByCluster) // 更新分区配置 p.POST("/update_conf", handler.UpdatePartitionsConfig) p.POST("/create_log", handler.CreatePartitionLog) diff --git a/dbm-services/mysql/db-partition/service/check_partition_base_func.go b/dbm-services/mysql/db-partition/service/check_partition_base_func.go index f8e98e375e..fedceab695 100644 --- a/dbm-services/mysql/db-partition/service/check_partition_base_func.go +++ b/dbm-services/mysql/db-partition/service/check_partition_base_func.go @@ -56,6 +56,8 @@ func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt } AddString(&addSqls, sql) if tb.Phase == online { + // 启用的分区规则,会执行删除历史分区 + // 禁用的分区规则,会新增分区,但是不会删除历史分区 sql, err = tb.GetDropPartitionSql() if err != nil { slog.Error("msg", "GetDropPartitionSql error", err) @@ -642,7 +644,7 @@ func CreatePartitionTicket(check Checker, objects []PartitionObject, zoneOffset zone, date, scheduler, "", ExecuteAsynchronous, check.ClusterType) } -// NeedPartition TODO +// NeedPartition 获取需要实施的分区规则 func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate string) ([]*Checker, error) { var configTb, logTb string var all, doNothing []*Checker @@ -657,10 +659,11 @@ func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate return nil, errors.New("不支持的db类型") } vzone := fmt.Sprintf("%+03d:00", zoneOffset) + // 集群被offline时,其分区规则也被禁用,规则不会被定时任务执行 vsql := fmt.Sprintf( "select id as config_id, bk_biz_id, cluster_id, immute_domain, port, bk_cloud_id,"+ - " '%s' as cluster_type from `%s`.`%s` where time_zone='%s' order by 2,3;", - clusterType, viper.GetString("db.name"), configTb, vzone) + " '%s' as cluster_type from `%s`.`%s` where time_zone='%s' and phase in ('%s','%s') order by 2,3;", + clusterType, viper.GetString("db.name"), configTb, vzone, online, offline) slog.Info(vsql) err := model.DB.Self.Raw(vsql).Scan(&all).Error if err != nil { diff --git a/dbm-services/mysql/db-partition/service/manage_config.go b/dbm-services/mysql/db-partition/service/manage_config.go index 9ece0b2cf5..27d4b672c9 100644 --- a/dbm-services/mysql/db-partition/service/manage_config.go +++ b/dbm-services/mysql/db-partition/service/manage_config.go @@ -489,6 +489,42 @@ func (m *DisablePartitionInput) DisablePartitionConfig() error { return nil } +// DisablePartitionConfigByCluster TODO +func (m *DisablePartitionInput) DisablePartitionConfigByCluster() error { + if len(m.ClusterIds) == 0 { + return errno.ConfigIdIsEmpty + } + var tbName string + // 判断是mysql集群还是spider集群 + var logTbName string + switch strings.ToLower(m.ClusterType) { + case Tendbha, Tendbsingle: + tbName = MysqlPartitionConfig + logTbName = MysqlManageLogsTable + case Tendbcluster: + tbName = SpiderPartitionConfig + logTbName = SpiderManageLogsTable + default: + return errors.New("不支持的db类型") + } + var list []string + for _, item := range m.ClusterIds { + list = append(list, strconv.FormatInt(int64(item), 10)) + + } + db := model.DB.Self.Table(tbName) + result := db. + Where(fmt.Sprintf("cluster_id in (%s)", strings.Join(list, ","))). + Update("phase", offlinewithclu) + if result.Error != nil { + return result.Error + } + for _, id := range m.Ids { + CreateManageLog(tbName, logTbName, id, "DisableByCluster", m.Operator) + } + return nil +} + // EnablePartitionConfig TODO func (m *EnablePartitionInput) EnablePartitionConfig() error { if len(m.Ids) == 0 { @@ -525,6 +561,42 @@ func (m *EnablePartitionInput) EnablePartitionConfig() error { return nil } +// EnablePartitionByCluster TODO +func (m *EnablePartitionInput) EnablePartitionByCluster() error { + if len(m.ClusterIds) == 0 { + return errno.ConfigIdIsEmpty + } + var tbName string + // 判断是mysql集群还是spider集群 + var logTbName string + switch strings.ToLower(m.ClusterType) { + case Tendbha, Tendbsingle: + tbName = MysqlPartitionConfig + logTbName = MysqlManageLogsTable + case Tendbcluster: + tbName = SpiderPartitionConfig + logTbName = SpiderManageLogsTable + default: + return errors.New("不支持的db类型") + } + var list []string + for _, item := range m.ClusterIds { + list = append(list, strconv.FormatInt(int64(item), 10)) + + } + db := model.DB.Self.Table(tbName) + result := db. + Where(fmt.Sprintf("cluster_id in (%s)", strings.Join(list, ","))). + Update("phase", online) + if result.Error != nil { + return result.Error + } + for _, id := range m.Ids { + CreateManageLog(tbName, logTbName, id, "EnableByCluster", m.Operator) + } + return nil +} + func (m *CreatePartitionsInput) compareWithSameArray() (warnings []string, err error) { l := len(m.DbLikes) for i := 0; i < l; i++ { diff --git a/dbm-services/mysql/db-partition/service/manage_config_object.go b/dbm-services/mysql/db-partition/service/manage_config_object.go index 3e5100e429..f464df6673 100644 --- a/dbm-services/mysql/db-partition/service/manage_config_object.go +++ b/dbm-services/mysql/db-partition/service/manage_config_object.go @@ -16,6 +16,7 @@ const SpiderPartitionCronLogTable = "spider_partition_cron_log" const online = "online" const offline = "offline" +const offlinewithclu = "offlinewithclu" const extraTime = 15 // MysqlManageLogsTable TODO @@ -99,6 +100,7 @@ type DisablePartitionInput struct { ClusterType string `json:"cluster_type"` Operator string `json:"operator"` Ids []int `json:"ids"` + ClusterIds []int `json:"cluster_ids"` } // EnablePartitionInput TODO @@ -106,6 +108,7 @@ type EnablePartitionInput struct { ClusterType string `json:"cluster_type"` Operator string `json:"operator"` Ids []int `json:"ids"` + ClusterIds []int `json:"cluster_ids"` } // ManageLog 审计分区管理行为 diff --git a/dbm-services/mysql/db-priv/service/accout_rule.go b/dbm-services/mysql/db-priv/service/accout_rule.go index 4efae8bf50..dc22f3c81e 100644 --- a/dbm-services/mysql/db-priv/service/accout_rule.go +++ b/dbm-services/mysql/db-priv/service/accout_rule.go @@ -49,6 +49,10 @@ func (m *BkBizId) QueryAccountRule() ([]*AccountRuleSplitUser, int64, error) { if err != nil { return nil, count, err } + // 没有查到帐号规则 + if len(acountList) == 0 { + return nil, count, nil + } for _, id := range acountList { accountIds = fmt.Sprintf("%d,%s", id.AccountId, accountIds) } diff --git a/dbm-services/mysql/db-priv/service/generate_random_string.go b/dbm-services/mysql/db-priv/service/generate_random_string.go index 677891495e..b87f1df08f 100644 --- a/dbm-services/mysql/db-priv/service/generate_random_string.go +++ b/dbm-services/mysql/db-priv/service/generate_random_string.go @@ -14,7 +14,7 @@ import ( const lowercase = "abcdefghijklmnopqrstuvwxyz" const uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" const number = "0123456789" -const symbol = `!#$%&()*+,-./:;<=>?@[]^_{|}~` // 剔除 " ' ` \ +const symbol = `!#%&()*+,-./;<=>?[]^_{|}~` // 剔除@ : $ " ' ` \ // 为密码池添加连续的字母序,数字序,特殊字符序和键盘序 const continuousSymbols = "~!@#$%^&*()_+" diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go index ab84eb5fe3..714351ee27 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go @@ -58,7 +58,12 @@ func initLogger(cfg *config.LogConfig) { } } - ioWriters = append(ioWriters, &lumberjack.Logger{Filename: logFile}) + ioWriters = append(ioWriters, &lumberjack.Logger{ + Filename: logFile, + MaxAge: 2, + //MaxBackups: 2, + Compress: true, + }) } handleOpt := slog.HandlerOptions{AddSource: cfg.Source} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/config2sql.pl b/dbm-services/mysql/db-tools/mysql-monitor/config2sql.pl index eb0fcac23c..0b85686406 100755 --- a/dbm-services/mysql/db-tools/mysql-monitor/config2sql.pl +++ b/dbm-services/mysql/db-tools/mysql-monitor/config2sql.pl @@ -10,6 +10,8 @@ my $item_value = encode_json($item); $item_value =~ s/"enable":"1"/"enable":true/; $item_value =~ s/"enable":"0"/"enable":false/; + $item_value =~ s/"enable":""/"enable":false/; + my $sql = sprintf(q#REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, diff --git a/dbm-services/mysql/db-tools/mysql-monitor/items-config.sql b/dbm-services/mysql/db-tools/mysql-monitor/items-config.sql index ffefc4a8ed..bb7f162e97 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/items-config.sql +++ b/dbm-services/mysql/db-tools/mysql-monitor/items-config.sql @@ -1,31 +1,31 @@ DELETE FROM tb_config_name_def WHERE namespace = 'tendb' AND conf_type = 'mysql_monitor' AND conf_file = 'items-config.yaml'; -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'character-consistency', 'STRING', '{"role":[],"schedule":"0 0 14 * * 1","machine_type":["single","backend","remote","spider"],"name":"character-consistency","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'routine-definer', 'STRING', '{"schedule":"0 0 15 * * 1","role":[],"machine_type":["single","backend","remote"],"name":"routine-definer","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'view-definer', 'STRING', '{"schedule":"0 0 15 * * 1","role":[],"name":"view-definer","machine_type":["single","backend","remote"],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'trigger-definer', 'STRING', '{"schedule":"0 0 15 * * 1","role":[],"machine_type":["single","backend","remote"],"name":"trigger-definer","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'engine', 'STRING', '{"role":[],"schedule":"0 0 12 * * *","machine_type":["single","backend","remote"],"name":"engine","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'ext3-check', 'STRING', '{"role":[],"schedule":"0 0 16 * * 1","machine_type":["single","backend","remote"],"name":"ext3-check","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'ibd-statistic', 'STRING', '{"schedule":"0 0 14 * * 1","role":["slave"],"name":"ibd-statistic","machine_type":["single","backend","remote"],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'master-slave-heartbeat', 'STRING', '{"enable":true,"role":["master","repeater","slave"],"schedule":"@every 1m","machine_type":["backend","remote"],"name":"master-slave-heartbeat"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-config-diff', 'STRING', '{"schedule":"0 5 10 * * *","role":[],"name":"mysql-config-diff","machine_type":["single","backend","remote","spider"],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-connlog-size', 'STRING', '{"machine_type":["single","backend","remote","spider"],"name":"mysql-connlog-size","schedule":"0 0 12 * * *","role":[],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-connlog-rotate', 'STRING', '{"enable":true,"schedule":"0 30 23 * * *","role":[],"machine_type":["single","backend","remote","spider"],"name":"mysql-connlog-rotate"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-err-notice', 'STRING', '{"enable":true,"role":[],"schedule":"@every 1m","machine_type":["single","backend","remote"],"name":"mysql-err-notice"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-err-critical', 'STRING', '{"machine_type":["single","backend","remote"],"name":"mysql-err-critical","role":[],"schedule":"@every 1m","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-err-notice', 'STRING', '{"schedule":"@every 1m","role":[],"machine_type":["spider"],"name":"spider-err-notice","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-err-warn', 'STRING', '{"schedule":"@every 1m","role":[],"machine_type":["spider"],"name":"spider-err-warn","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-err-critical', 'STRING', '{"enable":true,"machine_type":["spider"],"name":"spider-err-critical","role":[],"schedule":"@every 1m"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-lock', 'STRING', '{"enable":true,"role":[],"schedule":"@every 1m","name":"mysql-lock","machine_type":["single","backend","remote","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-inject', 'STRING', '{"enable":true,"schedule":"@every 1m","role":[],"machine_type":["single","backend","spider"],"name":"mysql-inject"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'proxy-backend', 'STRING', '{"enable":true,"schedule":"@every 1m","role":[],"machine_type":["proxy"],"name":"proxy-backend"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'proxy-user-list', 'STRING', '{"name":"proxy-user-list","machine_type":["proxy"],"schedule":"@every 1m","role":[],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'rotate-slowlog', 'STRING', '{"enable":true,"schedule":"0 55 23 * * *","role":[],"name":"rotate-slowlog","machine_type":["single","backend","remote","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'slave-status', 'STRING', '{"role":["slave","repeater"],"schedule":"@every 1m","name":"slave-status","machine_type":["backend","remote"],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'ctl-replicate', 'STRING', '{"schedule":"@every 1m","role":["spider_master"],"machine_type":["spider"],"name":"ctl-replicate","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-remote', 'STRING', '{"role":[],"schedule":"@every 1m","name":"spider-remote","machine_type":["spider"],"enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-table-schema-consistency', 'STRING', '{"schedule":"0 10 1 * * *","role":["spider_master"],"machine_type":["spider"],"name":"spider-table-schema-consistency","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'dbha-heartbeat', 'STRING', '{"enable":true,"schedule":"@every 1m","role":[],"name":"dbha-heartbeat","machine_type":["spider","remote","backend"]}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'unique-ctl-master', 'STRING', '{"schedule":"@every 1m","role":["spider_master"],"machine_type":["spider"],"name":"unique-ctl-master","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'scene-snapshot', 'STRING', '{"enable":true,"name":"scene-snapshot","machine_type":["spider","remote","backend","single"],"role":[],"schedule":"@every 1m"}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-timezone-change', 'STRING', '{"name":"mysql-timezone-change","machine_type":["spider","remote","backend","single"],"role":[],"schedule":"@every 1m","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); -REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'sys-timezone-change', 'STRING', '{"schedule":"@every 1m","role":[],"machine_type":["spider","proxy","remote","backend","single"],"name":"sys-timezone-change","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'character-consistency', 'STRING', '{"role":[],"name":"character-consistency","schedule":"0 0 14 * * 1","enable":true,"machine_type":["single","backend","remote","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'routine-definer', 'STRING', '{"role":[],"schedule":"0 0 15 * * 1","enable":true,"name":"routine-definer","machine_type":["single","backend","remote"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'view-definer', 'STRING', '{"enable":true,"schedule":"0 0 15 * * 1","name":"view-definer","role":[],"machine_type":["single","backend","remote"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'trigger-definer', 'STRING', '{"machine_type":["single","backend","remote"],"schedule":"0 0 15 * * 1","name":"trigger-definer","enable":true,"role":[]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'engine', 'STRING', '{"role":[],"enable":true,"schedule":"0 0 12 * * *","name":"engine","machine_type":["single","backend","remote"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'ext3-check', 'STRING', '{"role":[],"schedule":"0 0 16 * * 1","name":"ext3-check","enable":true,"machine_type":["single","backend","remote"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'ibd-statistic', 'STRING', '{"role":["slave"],"schedule":"0 0 14 * * 1","name":"ibd-statistic","enable":true,"machine_type":["single","backend","remote"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'master-slave-heartbeat', 'STRING', '{"machine_type":["backend","remote"],"name":"master-slave-heartbeat","schedule":"@every 1m","enable":true,"role":["master","repeater","slave"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-config-diff', 'STRING', '{"machine_type":["single","backend","remote","spider"],"name":"mysql-config-diff","schedule":"0 5 10 * * *","enable":true,"role":[]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-connlog-size', 'STRING', '{"role":[],"schedule":"0 0 12 * * *","name":"mysql-connlog-size","enable":true,"machine_type":["single","backend","remote","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-connlog-rotate', 'STRING', '{"role":[],"schedule":"0 30 23 * * *","name":"mysql-connlog-rotate","enable":true,"machine_type":["single","backend","remote","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-err-notice', 'STRING', '{"role":[],"enable":true,"schedule":"@every 1m","name":"mysql-err-notice","machine_type":["single","backend","remote"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-err-critical', 'STRING', '{"machine_type":["single","backend","remote"],"name":"mysql-err-critical","schedule":"@every 1m","enable":true,"role":[]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-err-notice', 'STRING', '{"machine_type":["spider"],"name":"spider-err-notice","schedule":"@every 1m","enable":true,"role":[]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-err-warn', 'STRING', '{"machine_type":["spider"],"role":[],"name":"spider-err-warn","schedule":"@every 1m","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-err-critical', 'STRING', '{"machine_type":["spider"],"role":[],"schedule":"@every 1m","enable":true,"name":"spider-err-critical"}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-lock', 'STRING', '{"role":[],"name":"mysql-lock","schedule":"@every 1m","enable":true,"machine_type":["single","backend","remote","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-inject', 'STRING', '{"role":[],"name":"mysql-inject","schedule":"@every 1m","enable":true,"machine_type":["single","backend","spider"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'proxy-backend', 'STRING', '{"role":[],"schedule":"@every 1m","name":"proxy-backend","enable":true,"machine_type":["proxy"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'proxy-user-list', 'STRING', '{"machine_type":["proxy"],"schedule":"@every 1m","name":"proxy-user-list","enable":true,"role":[]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'rotate-slowlog', 'STRING', '{"machine_type":["single","backend","remote","spider"],"role":[],"schedule":"0 55 23 * * *","enable":true,"name":"rotate-slowlog"}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'slave-status', 'STRING', '{"machine_type":["backend","remote"],"role":["slave","repeater"],"schedule":"@every 1m","name":"slave-status","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'ctl-replicate', 'STRING', '{"machine_type":["spider"],"enable":true,"schedule":"@every 1m","name":"ctl-replicate","role":["spider_master"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-remote', 'STRING', '{"machine_type":["spider"],"role":[],"enable":true,"schedule":"@every 1m","name":"spider-remote"}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'spider-table-schema-consistency', 'STRING', '{"role":["spider_master"],"name":"spider-table-schema-consistency","schedule":"0 10 1 * * *","enable":true,"machine_type":["spider"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'dbha-heartbeat', 'STRING', '{"schedule":"@every 1m","enable":true,"name":"dbha-heartbeat","role":[],"machine_type":["spider","remote","backend"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'unique-ctl-master', 'STRING', '{"machine_type":["spider"],"enable":true,"schedule":"@every 1m","name":"unique-ctl-master","role":["spider_master"]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'scene-snapshot', 'STRING', '{"machine_type":["spider","remote","backend","single"],"enable":false,"schedule":"@every 1m","name":"scene-snapshot","role":[]}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'mysql-timezone-change', 'STRING', '{"machine_type":["spider","remote","backend","single"],"role":[],"schedule":"@every 1m","name":"mysql-timezone-change","enable":true}', '', 'MAP', 1, 0, 0, 0, 1); +REPLACE INTO tb_config_name_def( namespace, conf_type, conf_file, conf_name, value_type, value_default, value_allowed, value_type_sub, flag_status, flag_disable, flag_locked, flag_encrypt, need_restart) VALUES( 'tendb', 'mysql_monitor', 'items-config.yaml', 'sys-timezone-change', 'STRING', '{"machine_type":["spider","proxy","remote","backend","single"],"schedule":"@every 1m","name":"sys-timezone-change","enable":true,"role":[]}', '', 'MAP', 1, 0, 0, 0, 1); diff --git a/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml b/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml index 2f3ce33b46..33a221b8a3 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml +++ b/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml @@ -210,7 +210,7 @@ role: - spider_master - name: scene-snapshot - enable: true + enable: false schedule: '@every 1m' machine_type: - spider diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/engineinnodbstatus.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/engineinnodbstatus.go index 12c52e0bd4..d7f0fc32ae 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/engineinnodbstatus.go +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/engineinnodbstatus.go @@ -7,7 +7,7 @@ import ( "github.com/jmoiron/sqlx" "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" - "dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/tarball" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/archivescenes" ) type engineInnodbStatus struct { @@ -19,7 +19,7 @@ type engineInnodbStatus struct { var engineInnodbStatusName = "engine-innodb-status" func engineInnodbStatusScene(db *sqlx.DB) error { - err := tarball.DeleteOld(engineInnodbStatusName, sceneBase, 1) + err := archivescenes.DeleteOld(engineInnodbStatusName, sceneBase, 1) if err != nil { return err } @@ -31,7 +31,7 @@ func engineInnodbStatusScene(db *sqlx.DB) error { content := fmt.Sprintf("Type:%s\nName:%s\nStatus:%s", res[0].Type, res[0].Name, res[0].Status) - err = tarball.Write(engineInnodbStatusName, sceneBase, []byte(content)) + err = archivescenes.Write(engineInnodbStatusName, sceneBase, []byte(content)) if err != nil { return err } diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/archivescenes/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/archivescenes/init.go new file mode 100644 index 0000000000..458c73b067 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/archivescenes/init.go @@ -0,0 +1,84 @@ +package archivescenes + +import ( + "compress/gzip" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + "time" + + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" +) + +func DeleteOld(name string, basePath string, days int) error { + oldFiles, err := findBefore(name, basePath, days) + if err != nil { + return err + } + + for _, oldFile := range oldFiles { + err := os.RemoveAll(oldFile) + if err != nil { + return err + } + } + + return nil +} + +func findBefore(name string, basePath string, days int) (oldFiles []string, err error) { + t := time.Now().Add(time.Hour * time.Duration((1-days)*24)) + d := time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()) + + err = filepath.Walk(basePath, func(p string, i fs.FileInfo, e error) error { + if e != nil { + return e + } + if strings.HasPrefix(i.Name(), name) && i.ModTime().Before(d) { + oldFiles = append(oldFiles, p) + } + return nil + }) + + return +} + +func Write(name string, basePath string, content []byte) error { + now := time.Now() + + archivePath := filepath.Join( + basePath, + fmt.Sprintf("%s.%d.%s", + name, + config.MonitorConfig.Port, + now.Format("20060102"), + ), + ) + + err := os.MkdirAll(archivePath, 0777) + if err != nil { + return err + } + + filePath := filepath.Join(archivePath, fmt.Sprintf("%s.gz", now.Format("20060102150405"))) + file, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0777) + if err != nil { + return err + } + defer func() { + _ = file.Close() + }() + + gw := gzip.NewWriter(file) + _, err = gw.Write(content) + if err != nil { + return err + } + defer func() { + _ = gw.Close() + }() + + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/tarball/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/tarball/init.go index 7aacf873e0..0f9930508a 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/tarball/init.go +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/tarball/init.go @@ -2,6 +2,8 @@ package tarball import ( "archive/tar" + "bytes" + "compress/gzip" "fmt" "io" "io/fs" @@ -10,6 +12,8 @@ import ( "strings" "time" + "github.com/pingcap/errors" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" ) @@ -47,53 +51,127 @@ func findBefore(name string, basePath string, days int) (oldFiles []string, err } func Write(name string, basePath string, content []byte) error { - diskFile, err := os.OpenFile( + now := time.Now() + + return appendToTarGz( filepath.Join( basePath, - fmt.Sprintf("%s.%d.%s.tar", name, config.MonitorConfig.Port, time.Now().Format("20060102"))), - os.O_CREATE|os.O_RDWR, - 0777) + fmt.Sprintf("%s.%d.%s.tar.gz", + name, config.MonitorConfig.Port, now.Format("20060102")), + ), + now.Format("20060102150405"), + content, + now, + ) +} + +/* +在 golang 里面只能用这样别扭的办法来实现 +追加内容到已有的 tar.gz 文件 +原因是 +1. tar 文件末尾有 1k 的空记录, 追加的时候需要做 Seek +2. tar.gz 的操作要嵌套一层 gzip.Writer/Reader, golang 没办法在 Writer/Reader 上做随机访问 +3. 有一个 WriterSeeker interface, 自己实现其实也挺麻烦 +*/ +func readTarGz(filePath string) (content []byte, err error) { + content = []byte{} + + file, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, 0777) if err != nil { - return err + return nil, errors.AddStack(err) } defer func() { - _ = diskFile.Close() + _ = file.Close() }() - st, err := diskFile.Stat() + st, err := file.Stat() if err != nil { - return err + return nil, errors.AddStack(err) } + if st.Size() > 0 { + /* + 如果文件不为空 + 把解压后的内容读入 content + */ + gzReader, err := gzip.NewReader(file) + if err != nil { + return nil, err + } + defer func() { + _ = gzReader.Close() + }() - if st.Size() >= 1024 { - _, err = diskFile.Seek(-1024, io.SeekEnd) + content, err = io.ReadAll(gzReader) if err != nil { - return err + return nil, errors.AddStack(err) + } + + /* + content 的内容是 tar 文件内容 + 如果长度大于 1k, 则丢弃掉末尾的空 tar 记录 + */ + if len(content) >= 1024 { + content = content[:len(content)-1024] } } - tarBall := tar.NewWriter(diskFile) - defer func() { - _ = tarBall.Close() - }() + return content, nil +} - now := time.Now() - err = tarBall.WriteHeader(&tar.Header{ - Name: time.Now().Format("20060102150405"), +func appendToTarGz(tarGzPath string, appendFileName string, appendContent []byte, now time.Time) error { + legacyContent, err := readTarGz(tarGzPath) + if err != nil { + return errors.AddStack(err) + } + + /* + 把新增内容写到内存的 []byte 中 + legacyContent + */ + buf := bytes.NewBuffer(legacyContent) + tw := tar.NewWriter(buf) + + err = tw.WriteHeader(&tar.Header{ + Name: appendFileName, Mode: 0644, - Size: int64(len(content)), + Size: int64(len(appendContent)), ModTime: now, AccessTime: now, ChangeTime: now, }) if err != nil { - return err + return errors.AddStack(err) } + _, err = tw.Write(appendContent) + if err != nil { + _ = tw.Close() + return errors.AddStack(err) + } + _ = tw.Flush() + _ = tw.Close() - _, err = tarBall.Write(content) + /* + 以覆盖方式把追加后的全量内容回写到归档文件 + */ + file, err := os.OpenFile(tarGzPath, os.O_TRUNC|os.O_RDWR|os.O_CREATE, 0777) if err != nil { - return err + return errors.AddStack(err) } + defer func() { + _ = file.Close() + }() + + gw, err := gzip.NewWriterLevel(file, gzip.BestCompression) + if err != nil { + return errors.AddStack(err) + } + _, err = gw.Write(buf.Bytes()) + if err != nil { + return errors.AddStack(err) + } + defer func() { + _ = gw.Close() + }() return nil } diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/processlist.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/processlist.go index 7353b2038f..34b26e403a 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/processlist.go +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/processlist.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/cast" "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" - "dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/tarball" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/itemscollect/scenesnapshot/internal/archivescenes" ) type mysqlProcess struct { @@ -45,7 +45,7 @@ func queryProcesslist(db *sqlx.DB) (res []*mysqlProcess, err error) { } func processListScene(db *sqlx.DB) error { - err := tarball.DeleteOld(processListName, sceneBase, 1) + err := archivescenes.DeleteOld(processListName, sceneBase, 1) if err != nil { return err } @@ -77,7 +77,7 @@ func processListScene(db *sqlx.DB) error { table.Render() - err = tarball.Write(processListName, sceneBase, b.Bytes()) + err = archivescenes.Write(processListName, sceneBase, b.Bytes()) if err != nil { return err } diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitoriteminterface/connection_collect.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitoriteminterface/connection_collect.go index bba25710c5..1618fc63d4 100644 --- a/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitoriteminterface/connection_collect.go +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitoriteminterface/connection_collect.go @@ -159,7 +159,7 @@ func connectDB(ip string, port int, ca *config.ConnectAuth) (*sqlx.DB, error) { ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) defer cancel() - return sqlx.ConnectContext( + db, err := sqlx.ConnectContext( ctx, "mysql", fmt.Sprintf( "%s:%s@tcp(%s:%d)/%s?parseTime=true&loc=%s&timeout=%s", @@ -169,4 +169,21 @@ func connectDB(ip string, port int, ca *config.ConnectAuth) (*sqlx.DB, error) { config.MonitorConfig.InteractTimeout, ), ) + if err != nil { + slog.Warn("first time connect failed", slog.String("error", err.Error())) + slog.Info("retry connect after 3 seconds") + time.Sleep(3 * time.Second) + return sqlx.ConnectContext( + ctx, + "mysql", fmt.Sprintf( + "%s:%s@tcp(%s:%d)/%s?parseTime=true&loc=%s&timeout=%s", + ca.User, ca.Password, ip, port, + "", + time.Local.String(), + config.MonitorConfig.InteractTimeout, + ), + ) + } + + return db, nil } diff --git a/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/deploy_monitor.go b/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/deploy_monitor.go index 1f6206167f..e4507aa3d0 100644 --- a/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/deploy_monitor.go +++ b/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/deploy_monitor.go @@ -191,7 +191,6 @@ func (i *DeployMonitorComp) GenerateCrondConfigYaml() (err error) { logger.Error("generate crond runtime.yaml error: %s", err.Error()) return err } - // todo mysql-crond 模版中jobs_user不是固定值 cmd := fmt.Sprintf(`sed -i "s/jobs_user: mysql/jobs_user: root/g" %s`, path.Join(cst.CrondPath, "runtime.yaml")) _, err = osutil.ExecShellCommand(false, cmd) if err != nil { diff --git a/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/restart.go b/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/restart.go index 0246d4d19e..01d16ffc8e 100644 --- a/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/restart.go +++ b/dbm-services/riak/db-tools/dbactuator/pkg/components/riak/restart.go @@ -32,7 +32,15 @@ func (i *RestartComp) Restart() error { if err != nil { logger.Error("execute shell [%s] error: %s", cmd, err.Error()) err = fmt.Errorf("execute shell [%s] error: %s", cmd, err.Error()) - return err + // 当节点已经关闭,restart会失败,尝试start + logger.Info("restart failed, try to start") + errStart := Start() + if errStart != nil { + // 返回restart的报错 + return err + } else { + return nil + } } time.Sleep(time.Minute) logger.Info("restart riak success") diff --git a/dbm-ui/backend/components/mysql_backup/client.py b/dbm-ui/backend/components/mysql_backup/client.py index b35644eb97..c2eacefb14 100644 --- a/dbm-ui/backend/components/mysql_backup/client.py +++ b/dbm-ui/backend/components/mysql_backup/client.py @@ -39,7 +39,7 @@ def __init__(self): method="POST", url="backupapi/client/install", description=_("backup_client下载,同步任务"), - default_timeout=600, + default_timeout=300, max_retry_times=1, ) diff --git a/dbm-ui/backend/components/mysql_partition/client.py b/dbm-ui/backend/components/mysql_partition/client.py index 4740e0dc03..2221ee8076 100644 --- a/dbm-ui/backend/components/mysql_partition/client.py +++ b/dbm-ui/backend/components/mysql_partition/client.py @@ -59,6 +59,16 @@ def __init__(self): url="partition/disable_partition", description=_("禁用分区"), ) + self.enable_partition_cluster = self.generate_data_api( + method="POST", + url="partition/enable_partition_cluster", + description=_("禁用分区"), + ) + self.disable_partition_cluster = self.generate_data_api( + method="POST", + url="partition/disable_partition_cluster", + description=_("禁用分区"), + ) self.query_log = self.generate_data_api( method="POST", url="partition/query_log", diff --git a/dbm-ui/backend/configuration/handlers/password.py b/dbm-ui/backend/configuration/handlers/password.py index 81e74f66a3..52ebb4e6a4 100644 --- a/dbm-ui/backend/configuration/handlers/password.py +++ b/dbm-ui/backend/configuration/handlers/password.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 from collections import defaultdict from typing import Any, Dict, List @@ -24,6 +23,7 @@ from backend.db_periodic_task.models import DBPeriodicTask from backend.db_services.ipchooser.query.resource import ResourceQueryHelper from backend.flow.consts import MySQLPasswordRole +from backend.utils.string import base64_decode, base64_encode class DBPasswordHandler(object): @@ -40,7 +40,7 @@ def verify_password_strength(cls, password: str, echo: bool = False): name=AsymmetricCipherConfigType.PASSWORD.value, content=password, salted=False ) # 密码需要用base64加密后传输 - b64_plain_password = base64.b64encode(plain_password.encode("utf-8")).decode("utf-8") + b64_plain_password = base64_encode(plain_password) check_result = DBPrivManagerApi.check_password( {"password": b64_plain_password, "security_rule_name": DBM_PASSWORD_SECURITY_NAME} ) @@ -84,7 +84,7 @@ def query_mysql_admin_password( mysql_admin_password_data["results"] = mysql_admin_password_data.pop("items") cloud_info = ResourceQueryHelper.search_cc_cloud(get_cache=True) for data in mysql_admin_password_data["results"]: - data["password"] = base64.b64decode(data["password"]).decode("utf-8") + data["password"] = base64_decode(data["password"]) data["bk_cloud_name"] = cloud_info[str(data["bk_cloud_id"])]["bk_cloud_name"] return mysql_admin_password_data @@ -120,7 +120,7 @@ def modify_mysql_admin_password(cls, operator: str, password: str, lock_hour: in modify_password_params = { "username": DBM_MYSQL_ADMIN_USER, "component": DBType.MySQL.value, - "password": base64.b64encode(password.encode("utf-8")).decode("utf-8"), + "password": base64_encode(password), "lock_hour": lock_hour, "operator": operator, "clusters": cluster_infos, @@ -176,4 +176,4 @@ def query_proxy_password(cls): } data = DBPrivManagerApi.get_password(params)["items"][0] # 注意要用base64解密 - return base64.b64decode(data["password"]).decode("utf8") + return base64_decode(data["password"]) diff --git a/dbm-ui/backend/configuration/views/password_policy.py b/dbm-ui/backend/configuration/views/password_policy.py index 2dbe3df7e9..9d7aee0c59 100644 --- a/dbm-ui/backend/configuration/views/password_policy.py +++ b/dbm-ui/backend/configuration/views/password_policy.py @@ -42,7 +42,12 @@ class PasswordPolicyViewSet(viewsets.SystemViewSet): pagination_class = None def _get_custom_permissions(self): - if self.action == "get_password_policy": + if self.action == [ + self.get_password_policy.__name__, + self.verify_password_strength.__name__, + self.get_random_password.__name__, + self.query_random_cycle.__name__, + ]: return [] return [ResourceActionPermission([ActionEnum.PASSWORD_POLICY_SET])] diff --git a/dbm-ui/backend/core/encrypt/aes.py b/dbm-ui/backend/core/encrypt/aes.py index 3191829f0d..486ade1d29 100644 --- a/dbm-ui/backend/core/encrypt/aes.py +++ b/dbm-ui/backend/core/encrypt/aes.py @@ -13,6 +13,7 @@ from Crypto.Cipher import AES from backend.core.encrypt.constants import AES_BLOCK_SIZE, AES_PADDING +from backend.utils.string import base64_encode def pad_it(data): @@ -38,7 +39,7 @@ def encrypt(data: str, aes_key: str) -> str: aes_key = aes_key.encode("utf-8") cipher = AES.new(aes_key, AES.MODE_CBC, aes_key) data = cipher.encrypt(pad_it(data).encode("utf-8")) - return base64.b64encode(data).decode("utf-8") + return base64_encode(data) def decrypt(data: str, aes_key: str) -> str: diff --git a/dbm-ui/backend/db_meta/api/proxy_instance/apis.py b/dbm-ui/backend/db_meta/api/proxy_instance/apis.py index baff762986..b0f01dd49b 100644 --- a/dbm-ui/backend/db_meta/api/proxy_instance/apis.py +++ b/dbm-ui/backend/db_meta/api/proxy_instance/apis.py @@ -77,41 +77,3 @@ def update(proxies): proxy_obj.status = new_status proxy_obj.save() - - -# @transaction.atomic -# def decommission(instances: List[Dict]): -# """ -# TODO:没使用到?待删除 -# 1. 仅支持 下架实例不在任何一个集群 -# 必要条件: -# 1. 不属于任何一个集群 ;属于集群的实例,需要走集群内下架接口 -# -# 场景: -# 1. 上架了,但未添加到集群 -# 2. 从集群内清理掉了 ;调用了 delete_proxies() -# """ -# logger.info("user request decmmission instances {}".format(instances)) -# proxy_objs = common.filter_out_instance_obj(instances, ProxyInstance.objects.all()) -# -# _t = common.in_another_cluster(proxy_objs) -# if _t: -# raise Exception(_("proxy {} 在集群里边").format(_t)) -# -# _t = common.not_exists(instances, ProxyInstance.objects.all()) -# if _t: -# raise Exception(_("proxy {} 不存在").format(_t)) -# -# for proxy_obj in proxy_objs: -# logger.info("remove proxy {} ".format(proxy_obj)) -# CcManage(proxy_obj.bk_biz_id).delete_service_instance(bk_instance_ids=[proxy_obj.bk_instance_id]) -# -# # 需要检查, 是否该机器上所有实例都已经清理干净, -# if len(ProxyInstance.objects.filter(machine__ip=proxy_obj.machine.ip).all()) > 0: -# logger.info("ignore storage machine {} , another instance existed.".format(proxy_obj.machine)) -# else: -# logger.info("proxy machine {}".format(proxy_obj.machine)) -# CcManage( -# proxy_obj.bk_biz_id, -# ).recycle_host([proxy_obj.machine.bk_host_id]) -# proxy_obj.machine.delete() diff --git a/dbm-ui/backend/db_meta/models/machine.py b/dbm-ui/backend/db_meta/models/machine.py index 78818f7b3c..a76eebc4d6 100644 --- a/dbm-ui/backend/db_meta/models/machine.py +++ b/dbm-ui/backend/db_meta/models/machine.py @@ -8,7 +8,9 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import copy +import gzip +import io +import json from dataclasses import asdict from django.db import models @@ -21,6 +23,7 @@ from backend.db_meta.enums import AccessLayer, ClusterType, MachineType from backend.db_meta.exceptions import HostDoseNotExistInCmdbException from backend.db_meta.models import AppCache, BKCity +from backend.utils.string import base64_encode class Machine(AuditedModel): @@ -56,95 +59,81 @@ def __str__(self): return self.ip @property - def dbm_meta(self) -> list: + def dbm_meta(self) -> dict: proxies = self.proxyinstance_set.all() storages = self.storageinstance_set.all() host_labels = [] - def shrink_dbm_meta(dbm_meta): - """数据裁剪""" - - if not dbm_meta: - return [] - - # 剔除实例属性,仅保留集群属性 - first_one = copy.deepcopy(dbm_meta[0]) - for custom_attr in ["instance_role", "instance_port"]: - first_one.pop(custom_attr) - - return { - "version": "v1", - "common": first_one, - "custom": list( - map(lambda x: {"instance_role": x["instance_role"], "instance_port": x["instance_port"]}, dbm_meta) - ), - } - - def remove_duplicates(seq): - unique = set() - for d in seq: - t = tuple(d.items()) - unique.add(t) - - return shrink_dbm_meta([dict(x) for x in unique]) - - if proxies: - for proxy in proxies: - for cluster in proxy.cluster.all(): - tendb_cluster_spider_ext = getattr(proxy, "tendbclusterspiderext", None) - host_labels.append( - asdict( - CommonHostDBMeta( - app=AppCache.get_app_attr(cluster.bk_biz_id, default=cluster.bk_biz_id), - appid=str(cluster.bk_biz_id), - cluster_type=cluster.cluster_type, - cluster_domain=cluster.immute_domain, - db_type=ClusterType.cluster_type_to_db_type(cluster.cluster_type), - # tendbcluster中扩展了proxy的类型,需要特殊处理 - instance_role=tendb_cluster_spider_ext.spider_role - if tendb_cluster_spider_ext - else "proxy", - instance_port=str(proxy.port), - ) + def compress_dbm_meta_content(dbm_meta: dict) -> str: + """ + 压缩 dbm_meta + """ + # 使用gzip压缩 + # python3.6 gzip 不支持 mtime 参数,python3.10 可以直接使用 gzip.compress 压缩 + buf = io.BytesIO() + with gzip.GzipFile(fileobj=buf, mode="wb", mtime=0) as f: + f.write(json.dumps(dbm_meta).encode("utf-8")) + compressed_data = buf.getvalue() + + # 将压缩后的字节转换为Base64编码的字符串 + base64_encoded_str = base64_encode(compressed_data) + return base64_encoded_str + + for proxy in proxies: + for cluster in proxy.cluster.all(): + tendb_cluster_spider_ext = getattr(proxy, "tendbclusterspiderext", None) + host_labels.append( + asdict( + CommonHostDBMeta( + app=AppCache.get_app_attr(cluster.bk_biz_id, default=cluster.bk_biz_id), + appid=str(cluster.bk_biz_id), + cluster_type=cluster.cluster_type, + cluster_domain=cluster.immute_domain, + db_type=ClusterType.cluster_type_to_db_type(cluster.cluster_type), + # tendbcluster中扩展了proxy的类型,需要特殊处理 + instance_role=tendb_cluster_spider_ext.spider_role + if tendb_cluster_spider_ext + else "proxy", + instance_port=str(proxy.port), ) ) - - if storages: - for storage in storages: - # influxdb需要单独处理 - if storage.cluster_type == ClusterType.Influxdb.value: - host_labels.append( - asdict( - CommonHostDBMeta( - app=AppCache.get_app_attr(storage.bk_biz_id, default=storage.bk_biz_id), - appid=str(storage.bk_biz_id), - cluster_domain=storage.machine.ip, - cluster_type=storage.cluster_type, - db_type=ClusterType.cluster_type_to_db_type(storage.cluster_type), - instance_role=storage.instance_role, - instance_port=str(storage.port), - ) + ) + + for storage in storages: + # influxdb需要单独处理 + if storage.cluster_type == ClusterType.Influxdb.value: + host_labels.append( + asdict( + CommonHostDBMeta( + app=AppCache.get_app_attr(storage.bk_biz_id, default=storage.bk_biz_id), + appid=str(storage.bk_biz_id), + cluster_domain=storage.machine.ip, + cluster_type=storage.cluster_type, + db_type=ClusterType.cluster_type_to_db_type(storage.cluster_type), + instance_role=storage.instance_role, + instance_port=str(storage.port), ) ) - continue - - for cluster in storage.cluster.all(): - host_labels.append( - asdict( - CommonHostDBMeta( - app=AppCache.get_app_attr(cluster.bk_biz_id, default=cluster.bk_biz_id), - appid=str(cluster.bk_biz_id), - cluster_domain=cluster.immute_domain, - cluster_type=cluster.cluster_type, - db_type=ClusterType.cluster_type_to_db_type(cluster.cluster_type), - instance_role=storage.instance_role, - instance_port=str(storage.port), - ) + ) + continue + + for cluster in storage.cluster.all(): + host_labels.append( + asdict( + CommonHostDBMeta( + app=AppCache.get_app_attr(cluster.bk_biz_id, default=cluster.bk_biz_id), + appid=str(cluster.bk_biz_id), + cluster_domain=cluster.immute_domain, + cluster_type=cluster.cluster_type, + db_type=ClusterType.cluster_type_to_db_type(cluster.cluster_type), + instance_role=storage.instance_role, + instance_port=str(storage.port), ) ) + ) - return remove_duplicates(host_labels) + return {"version": "v2", "content": compress_dbm_meta_content({"common": {}, "custom": host_labels})} @classmethod def get_host_info_from_cmdb(cls, bk_host_id: int) -> dict: diff --git a/dbm-ui/backend/db_meta/utils.py b/dbm-ui/backend/db_meta/utils.py index 9d5dbc6baf..4e7a31410c 100644 --- a/dbm-ui/backend/db_meta/utils.py +++ b/dbm-ui/backend/db_meta/utils.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging import os.path from collections import defaultdict @@ -34,6 +33,7 @@ from backend.db_services.ipchooser.constants import DB_MANAGE_SET from backend.db_services.ipchooser.query import resource from backend.flow.utils.cc_manage import CcManage +from backend.utils.string import base64_encode logger = logging.getLogger("root") @@ -69,7 +69,7 @@ def remove_cluster(cluster_id, job_clean=True, cc_clean=True): JobApi.fast_execute_script( { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, - "script_content": str(base64.b64encode(script_content.encode("utf-8")), "utf-8"), + "script_content": base64_encode(script_content), "task_name": _("清理集群"), "account_alias": "root", "script_language": 1, @@ -141,7 +141,7 @@ def remove_cluster_ips(bk_host_ids, job_clean=True, cc_clean=True): JobApi.fast_execute_script( { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, - "script_content": str(base64.b64encode(script_content.encode("utf-8")), "utf-8"), + "script_content": base64_encode(script_content), "task_name": _("清理集群"), "account_alias": "root", "script_language": 1, diff --git a/dbm-ui/backend/db_periodic_task/local_tasks/check_checksum.py b/dbm-ui/backend/db_periodic_task/local_tasks/check_checksum.py index dc3dc116ed..0050fb0edd 100644 --- a/dbm-ui/backend/db_periodic_task/local_tasks/check_checksum.py +++ b/dbm-ui/backend/db_periodic_task/local_tasks/check_checksum.py @@ -38,8 +38,7 @@ def add_not_consistent_table(self, db, table): self.details[db].append(table) -# @register_periodic_task(run_every=crontab(minute="*/1")) -@register_periodic_task(run_every=crontab(day_of_week="2,3,4,5,6", hour="3", minute="53")) +@register_periodic_task(run_every=crontab(day_of_week="0,3,4,5,6", hour="3", minute="53")) def auto_check_checksum(): """检查每天的校验结果,存入db_report数据库""" # 主库执行校验任务,备库第二天上报校验结果 @@ -105,6 +104,9 @@ def check_cluster_checksum(cluster_id: int, start_time: datetime, end_time: date "start_time": datetime2str(log_start_time), "end_time": datetime2str(end_time), "filter": machine_filter, + "start": 0, + "size": 10000, + "sort_list": [["dtEventTimeStamp", "desc"]], } ) @@ -121,7 +123,6 @@ def check_cluster_checksum(cluster_id: int, start_time: datetime, end_time: date checksum = Checksum(slave_ip, slave_port) for hit in resp["hits"]["hits"]: log = json.loads(hit["_source"]["log"]) - print(log) # 过滤出本集群本实例的日志 if log["cluster_id"] == cluster.id: if log["ip"] == slave_ip and log["port"] == slave_port: diff --git a/dbm-ui/backend/db_periodic_task/local_tasks/db_proxy.py b/dbm-ui/backend/db_periodic_task/local_tasks/db_proxy.py index 85c10f6b6a..60f8a5d376 100644 --- a/dbm-ui/backend/db_periodic_task/local_tasks/db_proxy.py +++ b/dbm-ui/backend/db_periodic_task/local_tasks/db_proxy.py @@ -9,7 +9,6 @@ specific language governing permissions and limitations under the License. """ -import base64 import copy import logging from collections import defaultdict @@ -31,6 +30,7 @@ from backend.db_proxy.models import ClusterExtension, DBCloudProxy, DBExtension from backend.db_services.ipchooser.query.resource import ResourceQueryHelper from backend.utils.redis import RedisConn +from backend.utils.string import base64_encode logger = logging.getLogger("celery") @@ -110,7 +110,7 @@ def _job_push_config_file(_cloud_id, _file_list, _nginx_list): "service_url": f"http://{extension.ip}:{extension.port}", } file_name = f"{extension.bk_biz_id}_{extension.db_type}_{extension.cluster_name}_nginx.conf" - file_content = str(base64.b64encode(template.render(conf_payload).encode("utf-8")), "utf-8") + file_content = base64_encode(template.render(conf_payload)) file_list.append({"file_name": file_name, "content": file_content}) # 这里先提前写入access url,至于是否执行成功根据is_flush diff --git a/dbm-ui/backend/db_proxy/views/job_callback/views.py b/dbm-ui/backend/db_proxy/views/job_callback/views.py index d1e5d5f436..fc0924aaef 100644 --- a/dbm-ui/backend/db_proxy/views/job_callback/views.py +++ b/dbm-ui/backend/db_proxy/views/job_callback/views.py @@ -9,7 +9,6 @@ specific language governing permissions and limitations under the License. """ -import base64 import json import logging @@ -29,6 +28,7 @@ from backend.flow.consts import SUCCESS_LIST from backend.flow.utils.script_template import fast_execute_script_common_kwargs from backend.utils.redis import RedisConn +from backend.utils.string import base64_encode logger = logging.getLogger("root") @@ -73,7 +73,7 @@ def push_conf_callback(self, request): job_payload = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": "restart_nginx", - "script_content": str(base64.b64encode(restart_nginx_tpl.encode("utf-8")), "utf-8"), + "script_content": base64_encode(restart_nginx_tpl), "script_language": 1, "target_server": { "ip_list": [ diff --git a/dbm-ui/backend/db_proxy/views/jobapi/views.py b/dbm-ui/backend/db_proxy/views/jobapi/views.py index 3e1f8e0be9..5a9e3452d6 100644 --- a/dbm-ui/backend/db_proxy/views/jobapi/views.py +++ b/dbm-ui/backend/db_proxy/views/jobapi/views.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 from typing import Any, Dict from django.utils.translation import ugettext as _ @@ -31,6 +30,7 @@ TransferFileSerializer, ) from backend.db_proxy.views.views import BaseProxyPassViewSet +from backend.utils.string import base64_encode class JobApiProxyPassViewSet(BaseProxyPassViewSet): @@ -55,7 +55,7 @@ def fast_execute_script(self, request): job_payloads: Dict[str, Any] = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": _("DBM 快速脚本执行"), - "script_content": str(base64.b64encode(validated_data["script_content"].encode("utf-8")), "utf-8"), + "script_content": base64_encode(validated_data["script_content"]), "script_language": validated_data["script_language"], "target_server": {"ip_list": validated_data["ip_list"]}, "timeout": validated_data["timeout"], diff --git a/dbm-ui/backend/db_report/views/dbmon_heartbeat_view.py b/dbm-ui/backend/db_report/views/dbmon_heartbeat_view.py index 780dc4426f..736c70327a 100644 --- a/dbm-ui/backend/db_report/views/dbmon_heartbeat_view.py +++ b/dbm-ui/backend/db_report/views/dbmon_heartbeat_view.py @@ -37,6 +37,7 @@ class DbmonHeatbeartCheckReportBaseViewSet(ReportBaseViewSet): filter_fields = { # 大部分时候不需要覆盖默认的filter "bk_biz_id": ["exact"], "cluster_type": ["exact", "in"], + "cluster": ["exact", "in"], "create_at": ["gte", "lte"], "status": ["exact", "in"], } @@ -65,7 +66,7 @@ class DbmonHeatbeartCheckReportBaseViewSet(ReportBaseViewSet): { "name": "dba", "display_name": _("业务所属dba"), - "format": ReportFieldFormat.STATUS.value, + "format": ReportFieldFormat.TEXT.value, }, { "name": "instance", diff --git a/dbm-ui/backend/db_report/views/meta_check_view.py b/dbm-ui/backend/db_report/views/meta_check_view.py index e8b415a036..8963c91e71 100644 --- a/dbm-ui/backend/db_report/views/meta_check_view.py +++ b/dbm-ui/backend/db_report/views/meta_check_view.py @@ -27,7 +27,7 @@ class MetaCheckReportInstanceBelongSerializer(serializers.ModelSerializer): class Meta: model = MetaCheckReport - fields = ("bk_biz_id", "ip", "port", "machine_type", "status", "msg") + fields = ("bk_biz_id", "ip", "port", "machine_type", "status", "msg", "create_at") swagger_schema_fields = {"example": mock_data.META_CHECK_DATA} @@ -72,6 +72,11 @@ class MetaCheckReportInstanceBelongViewSet(ReportBaseViewSet): "display_name": _("详情"), "format": ReportFieldFormat.TEXT.value, }, + { + "name": "create_at", + "display_name": _("创建时间"), + "format": ReportFieldFormat.TEXT.value, + }, ] @common_swagger_auto_schema( diff --git a/dbm-ui/backend/db_report/views/mysqlbackup_check_view.py b/dbm-ui/backend/db_report/views/mysqlbackup_check_view.py index 2223bf0839..bfe432521e 100644 --- a/dbm-ui/backend/db_report/views/mysqlbackup_check_view.py +++ b/dbm-ui/backend/db_report/views/mysqlbackup_check_view.py @@ -27,7 +27,7 @@ class MysqlBackupCheckReportSerializer(serializers.ModelSerializer): class Meta: model = MysqlBackupCheckReport - fields = ("bk_biz_id", "cluster", "cluster_type", "status", "msg") + fields = ("bk_biz_id", "cluster", "cluster_type", "status", "msg", "create_at") swagger_schema_fields = {"example": mock_data.MYSQL_BACKUP_CHECK_DATA} @@ -36,6 +36,7 @@ class MysqlBackupCheckReportBaseViewSet(ReportBaseViewSet): serializer_class = MysqlBackupCheckReportSerializer filter_fields = { # 大部分时候不需要覆盖默认的filter "bk_biz_id": ["exact"], + "cluster": ["exact", "in"], "cluster_type": ["exact", "in"], "create_at": ["gte", "lte"], "status": ["exact", "in"], @@ -67,6 +68,11 @@ class MysqlBackupCheckReportBaseViewSet(ReportBaseViewSet): "display_name": _("详情"), "format": ReportFieldFormat.TEXT.value, }, + { + "name": "create_at", + "display_name": _("时间"), + "format": ReportFieldFormat.TEXT.value, + }, ] @common_swagger_auto_schema( @@ -82,10 +88,10 @@ def list(self, request, *args, **kwargs): class MysqlFullBackupCheckReportViewSet(MysqlBackupCheckReportBaseViewSet): queryset = MysqlBackupCheckReport.objects.filter(subtype=MysqlBackupCheckSubType.FullBackup.value) serializer_class = MysqlBackupCheckReportSerializer - report_name = _("全备检查") + report_name = _("MySQL 全备检查") @common_swagger_auto_schema( - operation_summary=_("全备检查报告"), + operation_summary=_("MySQL 全备检查报告"), responses={status.HTTP_200_OK: MysqlBackupCheckReportSerializer()}, tags=[SWAGGER_TAG], ) @@ -99,7 +105,7 @@ class MysqlBinlogBackupCheckReportViewSet(MysqlBackupCheckReportBaseViewSet): report_name = _("集群binlog检查") @common_swagger_auto_schema( - operation_summary=_("binlog检查报告"), + operation_summary=_("MySQL binlog检查报告"), responses={status.HTTP_200_OK: MysqlBackupCheckReportSerializer()}, tags=[SWAGGER_TAG], ) diff --git a/dbm-ui/backend/db_report/views/redis_dbmeta_check_view.py b/dbm-ui/backend/db_report/views/redis_dbmeta_check_view.py index 0d30b0fd2a..73a6d8d842 100644 --- a/dbm-ui/backend/db_report/views/redis_dbmeta_check_view.py +++ b/dbm-ui/backend/db_report/views/redis_dbmeta_check_view.py @@ -26,7 +26,7 @@ class RedisDbmetaCheckReportSerializer(serializers.ModelSerializer): class Meta: model = MetaCheckReport - fields = ("bk_biz_id", "cluster", "cluster_type", "status", "msg") + fields = ("bk_biz_id", "cluster", "cluster_type", "status", "msg", "create_at") swagger_schema_fields = {"example": mock_data.REDIS_META_CHECK_DATA} @@ -35,6 +35,7 @@ class RedisDbmetaCheckReportBaseViewSet(ReportBaseViewSet): serializer_class = RedisDbmetaCheckReportSerializer filter_fields = { # 大部分时候不需要覆盖默认的filter "bk_biz_id": ["exact"], + "cluster": ["exact", "in"], "cluster_type": ["exact", "in"], "create_at": ["gte", "lte"], "status": ["exact", "in"], @@ -66,6 +67,11 @@ class RedisDbmetaCheckReportBaseViewSet(ReportBaseViewSet): "display_name": _("详情"), "format": ReportFieldFormat.TEXT.value, }, + { + "name": "create_at", + "display_name": _("时间"), + "format": ReportFieldFormat.TEXT.value, + }, ] @common_swagger_auto_schema( diff --git a/dbm-ui/backend/db_report/views/redisbackup_check_view.py b/dbm-ui/backend/db_report/views/redisbackup_check_view.py index cad26a28f8..5e49ed398e 100644 --- a/dbm-ui/backend/db_report/views/redisbackup_check_view.py +++ b/dbm-ui/backend/db_report/views/redisbackup_check_view.py @@ -26,7 +26,7 @@ class RedisBackupCheckReportSerializer(serializers.ModelSerializer): class Meta: model = RedisBackupCheckReport - fields = ("bk_biz_id", "cluster", "cluster_type", "instance", "status", "msg") + fields = ("bk_biz_id", "cluster", "cluster_type", "instance", "status", "msg", "create_at") swagger_schema_fields = {"example": mock_data.REDIS_BACKUP_CHECK_DATA} @@ -35,6 +35,7 @@ class RedisBackupCheckReportBaseViewSet(ReportBaseViewSet): serializer_class = RedisBackupCheckReportSerializer filter_fields = { # 大部分时候不需要覆盖默认的filter "bk_biz_id": ["exact"], + "cluster": ["exact", "in"], "cluster_type": ["exact", "in"], "create_at": ["gte", "lte"], "status": ["exact", "in"], @@ -71,6 +72,11 @@ class RedisBackupCheckReportBaseViewSet(ReportBaseViewSet): "display_name": _("详情"), "format": ReportFieldFormat.TEXT.value, }, + { + "name": "create_at", + "display_name": _("时间"), + "format": ReportFieldFormat.TEXT.value, + }, ] @common_swagger_auto_schema( @@ -86,10 +92,10 @@ def list(self, request, *args, **kwargs): class RedisFullBackupCheckReportViewSet(RedisBackupCheckReportBaseViewSet): queryset = RedisBackupCheckReport.objects.filter(subtype=RedisBackupCheckSubType.FullBackup.value) serializer_class = RedisBackupCheckReportSerializer - report_name = _("全备检查") + report_name = _("Redis 全备检查") @common_swagger_auto_schema( - operation_summary=_("全备检查报告"), + operation_summary=_("Redis 全备检查报告"), responses={status.HTTP_200_OK: RedisBackupCheckReportSerializer()}, tags=[SWAGGER_TAG], ) @@ -101,10 +107,10 @@ class RedisBinlogBackupCheckReportViewSet(RedisBackupCheckReportBaseViewSet): queryset = RedisBackupCheckReport.objects.filter(subtype=RedisBackupCheckSubType.BinlogBackup.value) serializer_class = RedisBackupCheckReportSerializer - report_name = _("集群binlog检查") + report_name = _("Redis集群binlog检查") @common_swagger_auto_schema( - operation_summary=_("binlog检查报告"), + operation_summary=_("Redis binlog检查报告"), responses={status.HTTP_200_OK: RedisBackupCheckReportSerializer()}, tags=[SWAGGER_TAG], ) diff --git a/dbm-ui/backend/db_services/dbbase/instances/handlers.py b/dbm-ui/backend/db_services/dbbase/instances/handlers.py index 576ea79874..a6d58ba8fd 100644 --- a/dbm-ui/backend/db_services/dbbase/instances/handlers.py +++ b/dbm-ui/backend/db_services/dbbase/instances/handlers.py @@ -14,7 +14,6 @@ from django.db.models import F, Q -from backend import env from backend.constants import IP_PORT_DIVIDER from backend.db_meta.models import Machine, ProxyInstance, StorageInstance from backend.db_services.dbbase.dataclass import DBInstance @@ -134,9 +133,7 @@ def check_instances( db_instances.append(db_inst) # 查询补充主机信息 - host_infos = HostHandler.check( - [{"bk_biz_id": env.DBA_APP_BK_BIZ_ID, "scope_type": "biz"}], [], [], bk_host_ids - ) + host_infos = HostHandler.check([{"bk_biz_id": self.bk_biz_id, "scope_type": "biz"}], [], [], bk_host_ids) host_id_info_map = {host_info["host_id"]: host_info for host_info in host_infos} return [ {**host_id_instance_map[str(db_inst)], **{"host_info": host_id_info_map.get(db_inst.bk_host_id, {})}} diff --git a/dbm-ui/backend/db_services/meta_import/serializers.py b/dbm-ui/backend/db_services/meta_import/serializers.py index 785fe917d5..097336e581 100644 --- a/dbm-ui/backend/db_services/meta_import/serializers.py +++ b/dbm-ui/backend/db_services/meta_import/serializers.py @@ -99,9 +99,8 @@ def validate(self, attrs): class TenDBClusterAppendCTLSerializer(serializers.Serializer): - bk_cloud_id = serializers.IntegerField(help_text=_("云区域ID"), default=0) - bk_biz_id = serializers.IntegerField(help_text=_("业务ID")) - cluster_ids = serializers.ListField(child=serializers.IntegerField(), help_text=_("待标准的集群列表")) + bk_biz_id = BizChoiceField(help_text=_("业务")) + file = serializers.FileField(help_text=_("域名列表文件")) use_stream = serializers.BooleanField(help_text=_("是否使用mydumper流式备份迁移"), required=False, default=False) drop_before = serializers.BooleanField(help_text=_("导入到tdbctl前,是否先删除"), required=False, default=False) threads = serializers.IntegerField(help_text=_("mydumper 并发"), required=False, default=0) diff --git a/dbm-ui/backend/db_services/meta_import/views.py b/dbm-ui/backend/db_services/meta_import/views.py index e21c3d6850..2ab553d473 100644 --- a/dbm-ui/backend/db_services/meta_import/views.py +++ b/dbm-ui/backend/db_services/meta_import/views.py @@ -36,6 +36,7 @@ from backend.ticket.builders.mysql.mysql_ha_standardize import TenDBHAStandardizeDetailSerializer from backend.ticket.builders.spider.metadata_import import TenDBClusterMetadataImportDetailSerializer from backend.ticket.builders.spider.mysql_spider_standardize import TenDBClusterStandardizeDetailSerializer +from backend.ticket.builders.tendbcluster.append_deploy_ctl import TenDBClusterAppendDeployCTLDetailSerializer from backend.ticket.constants import TicketType from backend.ticket.models import Ticket @@ -95,16 +96,16 @@ def tendbha_standardize(self, request, *args, **kwargs): domain_list.append(line.decode("utf-8").strip().rstrip(".")) cluster_ids = list( - Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value).values_list( - "id", flat=True - ) + Cluster.objects.filter( + bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value + ).values_list("id", flat=True) ) logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) exists_domains = list( - Cluster.objects.filter(immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value).values_list( - "immute_domain", flat=True - ) + Cluster.objects.filter( + bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBHA.value + ).values_list("immute_domain", flat=True) ) diff = list(set(domain_list) - set(exists_domains)) if diff: @@ -166,14 +167,14 @@ def tendbcluster_standardize(self, request, *args, **kwargs): cluster_ids = list( Cluster.objects.filter( - immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value + bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value ).values_list("id", flat=True) ) logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) exists_domains = list( Cluster.objects.filter( - immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value + bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value ).values_list("immute_domain", flat=True) ) diff = list(set(domain_list) - set(exists_domains)) @@ -206,7 +207,31 @@ def tendbcluster_standardize(self, request, *args, **kwargs): ) def tendbcluster_append_deploy_ctl(self, request, *args, **kwargs): data = self.params_validate(self.get_serializer_class()) + + domain_list = [] + for line in data.pop("file").readlines(): + domain_list.append(line.decode("utf-8".strip().rstrip("."))) + + cluster_ids = list( + Cluster.objects.filter( + bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value + ).values_list("id", flat=True) + ) + logger.info("domains: {}, ids: {}".format(domain_list, cluster_ids)) + + exists_domains = list( + Cluster.objects.filter( + bk_biz_id=data["bk_biz_id"], immute_domain__in=domain_list, cluster_type=ClusterType.TenDBCluster.value + ).values_list("immute_domain", flat=True) + ) + diff = list(set(cluster_ids) - set(exists_domains)) + if diff: + raise serializers.ValidationError(_("cluster {} not found".format(diff))) + + data["cluster_ids"] = cluster_ids + # 创建标准化ticket + TenDBClusterAppendDeployCTLDetailSerializer(data=data).is_valid(raise_exception=True) Ticket.create_ticket( ticket_type=TicketType.TENDBCLUSTER_APPEND_DEPLOY_CTL, creator=request.user.username, diff --git a/dbm-ui/backend/db_services/mysql/fixpoint_rollback/handlers.py b/dbm-ui/backend/db_services/mysql/fixpoint_rollback/handlers.py index 69c55845ce..bd087da111 100644 --- a/dbm-ui/backend/db_services/mysql/fixpoint_rollback/handlers.py +++ b/dbm-ui/backend/db_services/mysql/fixpoint_rollback/handlers.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import copy import json import re @@ -33,6 +32,7 @@ from backend.flow.consts import SUCCESS_LIST, DBActuatorActionEnum, DBActuatorTypeEnum, InstanceStatus, JobStatusEnum from backend.flow.engine.bamboo.scene.mysql.common.get_local_backup import get_local_backup_list from backend.flow.utils.script_template import dba_toolkit_actuator_template, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode from backend.utils.time import compare_time, datetime2str, find_nearby_time @@ -79,7 +79,7 @@ def _find_local_backup_script(self, port: int): } jinja_env = Environment() template = jinja_env.from_string(dba_toolkit_actuator_template) - return template.render(render_params).encode("utf-8") + return template.render(render_params) @staticmethod def _batch_make_job_requests(job_func: Callable, job_payloads: List[Dict]): @@ -401,9 +401,7 @@ def execute_backup_log_script(self) -> List[int]: execute_body: Dict[str, Any] = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": _("查询集群{}的备份日志").format(self.cluster.immute_domain), - "script_content": str( - base64.b64encode(self._find_local_backup_script(target_ip_infos[0]["port"])), "utf-8" - ), + "script_content": base64_encode(self._find_local_backup_script(target_ip_infos[0]["port"])), "script_language": 1, "target_server": { "ip_list": [ diff --git a/dbm-ui/backend/db_services/mysql/open_area/handlers.py b/dbm-ui/backend/db_services/mysql/open_area/handlers.py index 2268d83665..dba8ba232b 100644 --- a/dbm-ui/backend/db_services/mysql/open_area/handlers.py +++ b/dbm-ui/backend/db_services/mysql/open_area/handlers.py @@ -8,6 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +import copy import itertools from collections import defaultdict from typing import Any, Dict, List, Union @@ -15,14 +16,18 @@ from django.utils.translation import ugettext as _ from backend.components import DBPrivManagerApi +from backend.db_meta.enums import ClusterType from backend.db_meta.models import Cluster -from backend.db_services.mysql.open_area.exceptions import TendbOpenAreaBaseException +from backend.db_services.dbpermission.constants import AccountType from backend.db_services.mysql.open_area.models import TendbOpenAreaConfig +from backend.db_services.mysql.remote_service.handlers import RemoteServiceHandler class OpenAreaHandler: """封装开区的一些处理函数""" + ALL_TABLE_FLAG = "*all*" + @classmethod def validate_only_openarea(cls, bk_biz_id, config_name, config_id: int = -1) -> bool: """校验同一业务下的开区模板名称唯一""" @@ -31,30 +36,73 @@ def validate_only_openarea(cls, bk_biz_id, config_name, config_id: int = -1) -> return not is_duplicated @classmethod - def openarea_result_preview( - cls, operator: str, config_id: int, config_data: List[Dict[str, Union[int, str, Dict]]] - ) -> Dict[str, List[Dict[str, Any]]]: - config = TendbOpenAreaConfig.objects.get(id=config_id) - clusters = Cluster.objects.filter(id__in=[info["cluster_id"] for info in config_data]) - cluster_id__cluster = {cluster.id: cluster for cluster in clusters} + def __check_db_list(cls, source_db, real_dbs): + """检查库是否合法""" + if source_db not in real_dbs: + return _("源集群不存在库{},请检查或修改开区模板".format(source_db)) + return "" - # 获取开区执行数据 + @classmethod + def __check_table_list(cls, real_tables, source_db, check_tables): + """检查表是否合法""" + if cls.ALL_TABLE_FLAG in check_tables: + check_tables = real_tables[source_db] + + error_msg = "" + if cls.ALL_TABLE_FLAG not in check_tables and check_tables: + not_exist_tables = set(check_tables) - set(real_tables[source_db]) + if not_exist_tables: + error_msg = _("源集群库{}中不存在表{},请检查或修改开区模板".format(source_db, not_exist_tables)) + + return check_tables, error_msg + + @classmethod + def __get_openarea_execute_objects(cls, config, config_data, cluster_id__cluster): + """获取开区执行数据结构体""" + remote_handler = RemoteServiceHandler(bk_biz_id=config.bk_biz_id) openarea_results: List[Dict[str, Any]] = [] + + # 实时查询集群的库表 + real_dbs = remote_handler.show_databases(cluster_ids=[config.source_cluster_id])[0]["databases"] + cluster_db_infos = [{"cluster_id": config.source_cluster_id, "dbs": real_dbs}] + real_tables = remote_handler.show_tables(cluster_db_infos)[0]["table_data"] + + # 获取基础执行结构体 + execute_objects_tpl = [ + { + "source_db": config_rule["source_db"], + "target_db": config_rule["target_db_pattern"], + "schema_tblist": config_rule["schema_tblist"], + "data_tblist": config_rule["data_tblist"], + "priv_data": config_rule["priv_data"], + "authorize_ips": [], + } + for config_rule in config.config_rules + ] + # 校验每个开区执行结构,如果存在不合法的库表,则填充错误信息 + for info in execute_objects_tpl: + # 校验库是否存在 + err_db_msg = cls.__check_db_list(info["source_db"], real_dbs) + # 校验schema_tblist是否合法 + info["schema_tblist"], err_schema_tb_msg = cls.__check_table_list( + real_tables, info["source_db"], info["schema_tblist"] + ) + # 校验data_tblist是否合法 + info["data_tblist"], err_data_tb_msg = cls.__check_table_list( + real_tables, info["source_db"], info["data_tblist"] + ) + err_msg_list = [err_db_msg, err_schema_tb_msg, err_data_tb_msg] + info["error_msg"] = "\n".join([msg for msg in err_msg_list if msg]) + for data in config_data: - try: - execute_objects = [ - { - "source_db": config_rule["source_db"], - "target_db": config_rule["target_db_pattern"].format(**data["vars"]), - "schema_tblist": config_rule["schema_tblist"], - "data_tblist": config_rule["data_tblist"], - "priv_data": config_rule["priv_data"], - "authorize_ips": data["authorize_ips"], - } - for config_rule in config.config_rules - ] - except KeyError: - raise TendbOpenAreaBaseException(_("范式渲染缺少变量")) + # 获取开区执行数据 + execute_objects = copy.deepcopy(execute_objects_tpl) + for info in execute_objects: + try: + info["target_db"] = info["target_db"].format(**data["vars"]) + info["authorize_ips"] = data.get("authorize_ips", []) + except KeyError: + info["error_msg"] = info["error_msg"] + "\n" + _("范式{}渲染缺少变量".format(info["target_db"])) openarea_results.append( { @@ -64,9 +112,22 @@ def openarea_result_preview( } ) - # 获取开区授权规则 + return openarea_results + + @classmethod + def __get_openarea_rules_set(cls, config, config_data, operator, cluster_id__cluster): + """获取开区授权数据""" priv_ids = list(itertools.chain(*[rule["priv_data"] for rule in config.config_rules])) - authorize_rules = DBPrivManagerApi.list_account_rules({"bk_biz_id": config.bk_biz_id, "ids": priv_ids}) + # 如果没有授权ID,则直接返回为空 + if not priv_ids: + return [] + + account_type = ( + AccountType.TENDBCLUSTER if config.cluster_type == ClusterType.TenDBCluster else AccountType.MYSQL + ) + authorize_rules = DBPrivManagerApi.list_account_rules( + {"bk_biz_id": config.bk_biz_id, "ids": priv_ids, "cluster_type": account_type} + ) # 根据用户名和db将授权规则分批 user__dbs_rules: Dict[str, List[str]] = defaultdict(list) for rule_data in authorize_rules["items"]: @@ -85,7 +146,24 @@ def openarea_result_preview( "cluster_type": cluster_id__cluster[data["cluster_id"]].cluster_type, } for data in config_data + if data.get("authorize_ips") for user in user__dbs_rules.keys() ] + return authorize_details - return {"config_data": openarea_results, "rules_set": authorize_details} + @classmethod + def openarea_result_preview( + cls, operator: str, config_id: int, config_data: List[Dict[str, Union[int, str, Dict]]] + ) -> Dict[str, List[Dict[str, Any]]]: + config = TendbOpenAreaConfig.objects.get(id=config_id) + clusters = Cluster.objects.filter(id__in=[info["cluster_id"] for info in config_data]) + cluster_id__cluster = {cluster.id: cluster for cluster in clusters} + # 获取开区执行数据 + openarea_results: List[Dict[str, Any]] = cls.__get_openarea_execute_objects( + config, config_data, cluster_id__cluster + ) + # 获取开区授权规则 + rules_set: List[Dict[str, Any]] = cls.__get_openarea_rules_set( + config, config_data, operator, cluster_id__cluster + ) + return {"config_data": openarea_results, "rules_set": rules_set} diff --git a/dbm-ui/backend/db_services/mysql/sql_import/constants.py b/dbm-ui/backend/db_services/mysql/sql_import/constants.py index 336c1e515b..f10f87fbdf 100644 --- a/dbm-ui/backend/db_services/mysql/sql_import/constants.py +++ b/dbm-ui/backend/db_services/mysql/sql_import/constants.py @@ -19,6 +19,11 @@ CACHE_SEMANTIC_SKIP_PAUSE_FILED = "{bk_biz_id}_{root_id}_semantic_check_skip_pause" SQL_SEMANTIC_CHECK_DATA_EXPIRE_TIME = 7 * 24 * 60 * 60 +# 最大预览SQL文件大小200MB +MAX_PREVIEW_SQL_FILE_SIZE = 200 * 1024 * 1024 +# 最大上传SQL文件大小1G +MAX_UPLOAD_SQL_FILE_SIZE = 1024 * 1024 * 1024 + class SQLCharset(str, StructuredEnum): """sql语句的字符集类型""" diff --git a/dbm-ui/backend/db_services/mysql/sql_import/handlers.py b/dbm-ui/backend/db_services/mysql/sql_import/handlers.py index 9601b0377c..13a70bba2c 100644 --- a/dbm-ui/backend/db_services/mysql/sql_import/handlers.py +++ b/dbm-ui/backend/db_services/mysql/sql_import/handlers.py @@ -25,6 +25,7 @@ CACHE_SEMANTIC_AUTO_COMMIT_FIELD, CACHE_SEMANTIC_SKIP_PAUSE_FILED, CACHE_SEMANTIC_TASK_FIELD, + MAX_PREVIEW_SQL_FILE_SIZE, SQL_SEMANTIC_CHECK_DATA_EXPIRE_TIME, SQLImportMode, ) @@ -71,7 +72,9 @@ def upload_sql_file( # 如果上传的是sql内容, 则创建一个sql文件 if sql_content: sql_file = tempfile.NamedTemporaryFile(suffix=".sql") - sql_file.write(str.encode(sql_content, encoding="utf-8")) + content_byte = str.encode(sql_content, encoding="utf-8") + sql_file.write(content_byte) + sql_file.size = len(content_byte) sql_file.seek(0) sql_file_list = [sql_file] @@ -84,9 +87,13 @@ def upload_sql_file( # 恢复文件指针为文件头,否则会无法读取内容 TODO:如果sql内容过大需要进行内容读取吗? file.seek(0) - sql_file_info.update( - sql_path=sql_path, sql_content=file.read().decode("utf-8"), raw_file_name=file.name - ) + # 超过最大预览限制,则不支持预览 + if file.size > MAX_PREVIEW_SQL_FILE_SIZE: + sql_content = _("当前SQL文件过大,暂不提供内容预览...") + else: + sql_content = file.read().decode("utf-8") + + sql_file_info.update(sql_path=sql_path, sql_content=sql_content, raw_file_name=file.name) sql_file_info_list.append(sql_file_info) @@ -167,6 +174,7 @@ def semantic_check( "charset": charset, "path": path, "cluster_ids": cluster_ids, + "execute_sql_files": execute_sql_files, "execute_objects": execute_objects, "highrisk_warnings": highrisk_warnings, "ticket_mode": ticket_mode, diff --git a/dbm-ui/backend/db_services/mysql/sql_import/serializers.py b/dbm-ui/backend/db_services/mysql/sql_import/serializers.py index c665631f1b..1ef656b892 100644 --- a/dbm-ui/backend/db_services/mysql/sql_import/serializers.py +++ b/dbm-ui/backend/db_services/mysql/sql_import/serializers.py @@ -15,6 +15,7 @@ from backend.db_services.mysql.sql_import import mock_data from backend.db_services.mysql.sql_import.constants import ( BKREPO_SQLFILE_PATH, + MAX_UPLOAD_SQL_FILE_SIZE, SQLCharset, SQLExecuteTicketMode, SQLImportMode, @@ -43,6 +44,8 @@ def validate(self, attrs): raise ValidationError(_("不允许语法检查的sql的内容为空!")) for file in attrs.get("sql_files", []): + if file.size > MAX_UPLOAD_SQL_FILE_SIZE: + raise ValidationError(_("请保证单个文件{}不超过1G").fromat(file.name)) if file.name.rsplit(".")[-1] != "sql": raise ValidationError(_("请保证sql文件[{}]的后缀为.sql").format(file.name)) diff --git a/dbm-ui/backend/dbm_init/medium/medium.lock b/dbm-ui/backend/dbm_init/medium/medium.lock index 9c12e95338..388e09cf8c 100644 --- a/dbm-ui/backend/dbm_init/medium/medium.lock +++ b/dbm-ui/backend/dbm_init/medium/medium.lock @@ -1,68 +1,99 @@ +cloud: +- cloud-dbha: + buildPath: /blueking-dbm/dbm-services/common/dbha/ha-module/build/dbha + commitDate: '202403041719' + commitId: f84f9b4c92225b25736315b5feaf8bb02796e40f + name: dbha + version: 1.0.1 +- cloud-dns-bind: + buildPath: /toolkit/bind.tar.gz + name: bind.tar.gz + version: 1.0.0 +- cloud-dns-pullcrond: + buildPath: /blueking-dbm/dbm-services/common/db-dns/dns-reload/build/pull-crond + commitDate: '202311251037' + commitId: a9a2060d90ab1aa65ce90d87fcee9db20bc1309a + name: pull-crond + version: 1.0.1 +- cloud-drs: + buildPath: /blueking-dbm/dbm-services/mysql/db-remote-service/build/db-remote-service + commitDate: '202402201635' + commitId: 9b94ecc380f5c17a665098a06de8362571a6e18c + name: db-remote-service + version: 1.0.1 +- cloud-drs-tmysqlparse: + buildPath: /toolkit/tmysqlparse + name: tmysqlparse + version: 1.0.0 +- cloud-nginx: + buildPath: /toolkit/nginx-portable.tgz + name: nginx-portable.tgz + version: 1.0.0 es: - actuator: buildPath: /blueking-dbm/dbm-services/bigdata/db-tools/dbactuator/build/dbactuator - commitId: 183acccc032756be2ada909b423aaaf14ca5f83e - commitDate: 200001010101 + commitDate: '202401311048' + commitId: ce513137085dbf931362ae85897f78fb6bdb336e name: dbactuator - version: 1.0.2 + version: 1.0.3 hdfs: - actuator: buildPath: /blueking-dbm/dbm-services/bigdata/db-tools/dbactuator/build/dbactuator - commitId: 183acccc032756be2ada909b423aaaf14ca5f83e - commitDate: 200001010101 + commitDate: '202401311048' + commitId: ce513137085dbf931362ae85897f78fb6bdb336e name: dbactuator - version: 1.0.2 + version: 1.0.3 influxdb: - actuator: buildPath: /blueking-dbm/dbm-services/bigdata/db-tools/dbactuator/build/dbactuator - commitId: 183acccc032756be2ada909b423aaaf14ca5f83e - commitDate: 200001010101 + commitDate: '202401311048' + commitId: ce513137085dbf931362ae85897f78fb6bdb336e name: dbactuator - version: 1.0.2 + version: 1.0.3 kafka: - actuator: buildPath: /blueking-dbm/dbm-services/bigdata/db-tools/dbactuator/build/dbactuator - commitId: 183acccc032756be2ada909b423aaaf14ca5f83e - commitDate: 200001010101 + commitDate: '202401311048' + commitId: ce513137085dbf931362ae85897f78fb6bdb336e name: dbactuator - version: 1.0.2 + version: 1.0.3 mysql: - actuator: buildPath: /blueking-dbm/dbm-services/mysql/db-tools/dbactuator/build/dbactuator - commitId: 166a1ed8856a062de2ced5bdd3bc7d8c315457ca - commitDate: 200001010101 + commitDate: '202402231651' + commitId: 39076645f480dfbb3d5042d47a50ed7145e96d28 name: dbactuator - version: 1.0.3 + version: 1.0.4 - dbbackup: buildPath: /blueking-dbm/dbm-services/mysql/db-tools/mysql-dbbackup/build/dbbackup-go-community.tar.gz - commitId: 166a1ed8856a062de2ced5bdd3bc7d8c315457ca - commitDate: 200001010101 + commitDate: '202402011539' + commitId: c9e70ca67ba6d6bb83e04732c825f484718a8222 name: dbbackup-go-community.tar.gz - version: 1.0.2 + version: 1.0.3 - mysql-checksum: buildPath: /blueking-dbm/dbm-services/mysql/db-tools/mysql-table-checksum/build/mysql-checksum.tar.gz - commitId: 1a7b99bf32dc9d2445e3fe8637cb21faebf1f08c - commitDate: 200001010101 + commitDate: '202401151151' + commitId: b161746db1a253c6f536d610b019eeca87f69d18 name: mysql-checksum.tar.gz - version: 1.0.3 + version: 1.0.4 - mysql-crond: buildPath: /blueking-dbm/dbm-services/mysql/db-tools/mysql-crond/build/mysql-crond.tar.gz - commitId: 378047fc5156920d6d174a7e053d3c75a2e63023 - commitDate: 200001010101 + commitDate: '202402021702' + commitId: e235802e3886165b27d28f4244f883b8000efeb7 name: mysql-crond.tar.gz - version: 1.0.3 + version: 1.0.4 - rotate-binlog: buildPath: /blueking-dbm/dbm-services/mysql/db-tools/mysql-rotatebinlog/build/mysql-rotatebinlog.tar.gz - commitId: 166a1ed8856a062de2ced5bdd3bc7d8c315457ca - commitDate: 200001010101 + commitDate: '202401220953' + commitId: 2cfa9bd81c114e66665724909ea88c5242f9f7bc name: mysql-rotatebinlog.tar.gz - version: 1.0.3 + version: 1.0.4 - mysql-monitor: buildPath: /blueking-dbm/dbm-services/mysql/db-tools/mysql-monitor/build/mysql-monitor.tar.gz - commitId: 378047fc5156920d6d174a7e053d3c75a2e63023 - commitDate: 200001010101 + commitDate: '202403111541' + commitId: 9e2b9eb7c0c516a3595b7fb83495fe8343145bf8 name: mysql-monitor.tar.gz - version: 1.0.3 + version: 1.0.4 - dba-toolkit: buildPath: /toolkit/dba-toolkit.tar.gz name: dba-toolkit.tar.gz @@ -70,55 +101,24 @@ mysql: pulsar: - actuator: buildPath: /blueking-dbm/dbm-services/bigdata/db-tools/dbactuator/build/dbactuator - commitId: 183acccc032756be2ada909b423aaaf14ca5f83e - commitDate: 200001010101 + commitDate: '202401311048' + commitId: ce513137085dbf931362ae85897f78fb6bdb336e name: dbactuator - version: 1.0.2 + version: 1.0.3 redis: - actuator: buildPath: /blueking-dbm/dbm-services/redis/db-tools/dbactuator/build/dbactuator_redis - commitId: 255e57334099b76332819d56a093b7553b68998b - commitDate: 200001010101 + commitDate: '202401221438' + commitId: e593ecd9ccac15fe153ca3e4306b68c5fd4abeea name: dbactuator_redis - version: 1.0.3 + version: 1.0.4 - dbmon: buildPath: /blueking-dbm/dbm-services/redis/db-tools/dbmon/build/bk-dbmon-*.tar.gz - commitId: 255e57334099b76332819d56a093b7553b68998b - commitDate: 200001010101 + commitDate: '202401221438' + commitId: e593ecd9ccac15fe153ca3e4306b68c5fd4abeea name: dbmon - version: 1.0.3 + version: 1.0.4 - redis-dts: buildPath: /toolkit/redis_dts.tar.gz name: redis_dts.tar.gz version: 1.0.0 -cloud: -- cloud-dbha: - buildPath: /blueking-dbm/dbm-services/common/dbha/ha-module/build/dbha - commitId: "" - commitDate: 200001010101 - name: dbha - version: 1.0.0 -- cloud-dns-bind: - buildPath: /toolkit/bind.tar.gz - name: bind.tar.gz - version: 1.0.0 -- cloud-dns-pullcrond: - buildPath: /blueking-dbm/dbm-services/common/db-dns/dns-reload/build/pull-crond - commitId: "" - commitDate: 200001010101 - name: pull-crond - version: 1.0.0 -- cloud-drs: - buildPath: /blueking-dbm/dbm-services/mysql/db-remote-service/build/db-remote-service - commitId: "" - commitDate: 200001010101 - name: db-remote-service - version: 1.0.0 -- cloud-drs-tmysqlparse: - buildPath: /toolkit/tmysqlparse - name: tmysqlparse - version: 1.0.0 -- cloud-nginx: - buildPath: /toolkit/nginx-portable.tgz - name: nginx-portable.tgz - version: 1.0.0 diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py b/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py index 9b90be430f..570ac5e91c 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py @@ -517,8 +517,11 @@ def tdbctl_install_package(self) -> list: tdbctl_pkg = Package.get_latest_package( version=MediumEnum.Latest, pkg_type=MediumEnum.tdbCtl, db_type=DBType.MySQL ) + db_backup_pkg = Package.get_latest_package(version=MediumEnum.Latest, pkg_type=MediumEnum.DbBackup) + return [ f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{self.actuator_pkg.path}", + f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{db_backup_pkg.path}", f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{tdbctl_pkg.path}", ] diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py index 366fc816b1..227ad0a26b 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py @@ -278,8 +278,11 @@ def mysql_open_area_flow(self): if data_flag: pipeline.add_sub_pipeline(sub_flow=self.open_area_data_flow()) - # 对开区的新集群进行授权 - pipeline.add_act(act_name=_("添加mysql规则授权"), act_component_code=AuthorizeRulesComponent.code, kwargs=self.data) + # 判断是否对开区的集群进行授权 + if self.data.get("rules_set"): + pipeline.add_act( + act_name=_("添加mysql规则授权"), act_component_code=AuthorizeRulesComponent.code, kwargs=self.data + ) pipeline.run_pipeline(is_drop_random_user=True) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_apply_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_apply_flow.py index 532c9a87d4..f427b0b6f3 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_apply_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_apply_flow.py @@ -180,7 +180,7 @@ def deploy_riak_cluster_flow(self): exec_ip=ips[0], bk_cloud_id=bk_cloud_id, run_as_system_user=DBA_ROOT_USER, - get_riak_payload_func=RiakActPayload.get_commit_cluster_change_payload.__name__, + get_riak_payload_func=RiakActPayload.get_init_bucket_type_payload.__name__, ) ), ) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_destroy_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_destroy_flow.py index 9e5b89036a..2cd0a3a7ff 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_destroy_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_destroy_flow.py @@ -60,7 +60,6 @@ def riak_cluster_destroy_flow(self): sub_pipeline = SubBuilder(root_id=self.root_id, data=self.data) sub_pipeline.add_act(act_name=_("获取集群中的节点"), act_component_code=GetRiakClusterNodeComponent.code, kwargs={}) - sub_pipeline.add_act( act_name=_("下发actuator"), act_component_code=TransFileComponent.code, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_migrate_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_migrate_flow.py index 20122110d0..323d38f2aa 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_migrate_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/riak/riak_cluster_migrate_flow.py @@ -17,16 +17,19 @@ from backend.components import DBConfigApi from backend.components.dbconfig.constants import ConfType, FormatType, LevelName, ReqType +from backend.configuration.constants import DBType from backend.flow.consts import DBA_ROOT_USER, DEPENDENCIES_PLUGINS, NameSpaceEnum from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.plugins.components.collections.common.install_nodeman_plugin import ( InstallNodemanPluginServiceComponent, ) from backend.flow.plugins.components.collections.riak.exec_actuator_script import ExecuteRiakActuatorScriptComponent from backend.flow.plugins.components.collections.riak.get_riak_resource import GetRiakResourceComponent from backend.flow.plugins.components.collections.riak.riak_db_meta import RiakDBMetaComponent +from backend.flow.plugins.components.collections.riak.trans_files import TransFileComponent from backend.flow.utils.common_act_dataclass import InstallNodemanPluginKwargs -from backend.flow.utils.riak.riak_act_dataclass import DBMetaFuncKwargs +from backend.flow.utils.riak.riak_act_dataclass import DBMetaFuncKwargs, DownloadMediaKwargs from backend.flow.utils.riak.riak_act_payload import RiakActPayload from backend.flow.utils.riak.riak_context_dataclass import ApplyManualContext, RiakActKwargs from backend.flow.utils.riak.riak_db_meta import RiakDBMeta @@ -92,6 +95,18 @@ def migrate_riak_cluster_flow(self): ) sub_pipeline.add_parallel_acts(acts_list=acts_list) + sub_pipeline.add_act( + act_name=_("下发actuator以及riak介质"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=bk_cloud_id, + exec_ip=ips, + file_list=GetFileList(db_type=DBType.Riak).riak_install_package(self.data["db_version"]), + ) + ), + ) + self._get_riak_config(cluster["domain"]) sub_pipeline.add_act( diff --git a/dbm-ui/backend/flow/plugins/components/collections/cloud/exec_service_script.py b/dbm-ui/backend/flow/plugins/components/collections/cloud/exec_service_script.py index e78648666f..4f99c9d5fa 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/cloud/exec_service_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/cloud/exec_service_script.py @@ -9,7 +9,6 @@ specific language governing permissions and limitations under the License. """ -import base64 import logging from typing import List @@ -24,6 +23,7 @@ from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.cloud.cloud_act_payload import CloudServiceActPayload from backend.flow.utils.es.es_script_template import fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -93,7 +93,7 @@ def _exec_job_task(self, script_tpl, service_act_payload, target_ip_info, kwargs body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{kwargs['node_name']}_{kwargs['node_id']}", - "script_content": str(base64.b64encode(template.render(service_act_payload).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(service_act_payload)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/cloud/push_config_file.py b/dbm-ui/backend/flow/plugins/components/collections/cloud/push_config_file.py index de7cfe0f63..1986e23c57 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/cloud/push_config_file.py +++ b/dbm-ui/backend/flow/plugins/components/collections/cloud/push_config_file.py @@ -9,7 +9,6 @@ specific language governing permissions and limitations under the License. """ -import base64 import copy import logging @@ -20,6 +19,7 @@ from backend.components import JobApi from backend.core import consts from backend.flow.plugins.components.collections.cloud.exec_service_script import ExecCloudScriptService +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -47,7 +47,7 @@ def _exec_job_task(self, script_tpl, service_act_payload, target_ip_info, kwargs payload["file_list"] = [ { "file_name": kwargs["conf_file_name"], - "content": str(base64.b64encode(template.render(service_act_payload).encode("utf-8")), "utf-8"), + "content": base64_encode(template.render(service_act_payload)), } ] payload["target_server"]["ip_list"] = target_ip_info diff --git a/dbm-ui/backend/flow/plugins/components/collections/common/check_resolv_conf.py b/dbm-ui/backend/flow/plugins/components/collections/common/check_resolv_conf.py index b4f098dbd6..756da4e724 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/common/check_resolv_conf.py +++ b/dbm-ui/backend/flow/plugins/components/collections/common/check_resolv_conf.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging from typing import List @@ -21,6 +20,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.redis.redis_script_template import redis_fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -53,7 +53,7 @@ def _execute(self, data, parent_data) -> bool: # 这里不能换成业务传进来的bk_biz_id,否则会获取作业状态失败 "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(shell_command.encode("utf-8")), "utf-8"), + "script_content": base64_encode(shell_command), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/common/dns_server.py b/dbm-ui/backend/flow/plugins/components/collections/common/dns_server.py index 0ab098f1da..c28183fbd4 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/common/dns_server.py +++ b/dbm-ui/backend/flow/plugins/components/collections/common/dns_server.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging import random from typing import List @@ -24,6 +23,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.redis.redis_script_template import redis_fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -108,7 +108,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(shell_command.encode("utf-8")), "utf-8"), + "script_content": base64_encode(shell_command), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/es/exec_es_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/es/exec_es_actuator_script.py index 9f14e8b510..0e3fd98bc7 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/es/exec_es_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/es/exec_es_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging from typing import List @@ -24,6 +23,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.es.es_script_template import actuator_template, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -78,9 +78,7 @@ def _execute(self, data, parent_data) -> bool: db_act_template["version_id"] = self._runtime_attrs["version"] db_act_template["uid"] = global_data["uid"] - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) @@ -91,7 +89,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/es/rewrite_es_config.py b/dbm-ui/backend/flow/plugins/components/collections/es/rewrite_es_config.py index 281a193d64..3b1a6b4975 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/es/rewrite_es_config.py +++ b/dbm-ui/backend/flow/plugins/components/collections/es/rewrite_es_config.py @@ -9,7 +9,6 @@ specific language governing permissions and limitations under the License. """ -import base64 import logging from typing import List @@ -19,6 +18,7 @@ from backend.components.mysql_priv_manager.client import DBPrivManagerApi from backend.flow.consts import MySQLPrivComponent, NameSpaceEnum from backend.flow.plugins.components.collections.common.base_service import BaseService +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -40,7 +40,7 @@ def _execute(self, data, parent_data) -> bool: "bk_cloud_id": global_data["bk_cloud_id"], } ], - "password": base64.b64encode(str(global_data["username"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(str(global_data["username"])), "username": MySQLPrivComponent.ES_FAKE_USER.value, "component": NameSpaceEnum.Es, "operator": "admin", @@ -55,7 +55,7 @@ def _execute(self, data, parent_data) -> bool: "bk_cloud_id": global_data["bk_cloud_id"], } ], - "password": base64.b64encode(str(global_data["password"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(str(global_data["password"])), "username": global_data["username"], "component": NameSpaceEnum.Es, "operator": "admin", diff --git a/dbm-ui/backend/flow/plugins/components/collections/hdfs/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/hdfs/exec_actuator_script.py index 66143da28c..70ffb7ab1c 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/hdfs/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/hdfs/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging from dataclasses import asdict @@ -25,6 +24,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.hdfs.hdfs_script_template import ACTUATOR_TEMPLATE, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -85,9 +85,7 @@ def _execute(self, data, parent_data) -> bool: db_act_template["version_id"] = self._runtime_attrs["version"] db_act_template["uid"] = global_data["uid"] - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) @@ -98,7 +96,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/hdfs/rewrite_hdfs_config.py b/dbm-ui/backend/flow/plugins/components/collections/hdfs/rewrite_hdfs_config.py index c9bf4a3424..cd56719557 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/hdfs/rewrite_hdfs_config.py +++ b/dbm-ui/backend/flow/plugins/components/collections/hdfs/rewrite_hdfs_config.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging from typing import List @@ -22,6 +21,7 @@ from backend.flow.consts import ConfigTypeEnum, LevelInfoEnum, NameSpaceEnum from backend.flow.plugins.components.collections.common.base_service import BaseService from backend.ticket.constants import TicketType +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -56,7 +56,7 @@ def _execute(self, data, parent_data) -> bool: "bk_cloud_id": global_data["bk_cloud_id"], } ], - "password": base64.b64encode(str(global_data["password"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(global_data["password"]), "username": "root", "component": NameSpaceEnum.Hdfs, "operator": "admin", diff --git a/dbm-ui/backend/flow/plugins/components/collections/influxdb/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/influxdb/exec_actuator_script.py index 98d616a648..68ec7c61a2 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/influxdb/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/influxdb/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging from typing import List @@ -23,6 +22,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.kafka.script_template import ACTUATOR_TEMPLATE, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -70,9 +70,7 @@ def _execute(self, data, parent_data) -> bool: db_act_template["version_id"] = self._runtime_attrs["version"] db_act_template["uid"] = global_data["uid"] - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) @@ -83,7 +81,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_config.py b/dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_config.py index 786fdccd15..8e76152cec 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_config.py +++ b/dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_config.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging from typing import List @@ -21,6 +20,7 @@ from backend.db_meta.models import StorageInstance from backend.flow.consts import MySQLPrivComponent, NameSpaceEnum from backend.flow.plugins.components.collections.common.base_service import BaseService +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -50,7 +50,7 @@ def _execute(self, data, parent_data) -> bool: # 把用户名当密码存 query_params = { "instances": [{"ip": str(storage_obj.id), "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(global_data["username"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(global_data["username"]), "username": MySQLPrivComponent.INFLUXDB_FAKE_USER.value, "component": NameSpaceEnum.Influxdb, "operator": "admin", @@ -59,7 +59,7 @@ def _execute(self, data, parent_data) -> bool: # 存真实的用户名密码 query_params = { "instances": [{"ip": str(storage_obj.id), "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(global_data["password"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(global_data["password"]), "username": global_data["username"], "component": NameSpaceEnum.Influxdb, "operator": "admin", diff --git a/dbm-ui/backend/flow/plugins/components/collections/kafka/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/kafka/exec_actuator_script.py index 98d616a648..68ec7c61a2 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/kafka/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/kafka/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging from typing import List @@ -23,6 +22,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.kafka.script_template import ACTUATOR_TEMPLATE, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -70,9 +70,7 @@ def _execute(self, data, parent_data) -> bool: db_act_template["version_id"] = self._runtime_attrs["version"] db_act_template["uid"] = global_data["uid"] - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) @@ -83,7 +81,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/kafka/kafka_config.py b/dbm-ui/backend/flow/plugins/components/collections/kafka/kafka_config.py index 1aef087439..70aa6a878a 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/kafka/kafka_config.py +++ b/dbm-ui/backend/flow/plugins/components/collections/kafka/kafka_config.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging from typing import List @@ -20,6 +19,7 @@ from backend.components.mysql_priv_manager.client import DBPrivManagerApi from backend.flow.consts import ConfigTypeEnum, LevelInfoEnum, MySQLPrivComponent, NameSpaceEnum from backend.flow.plugins.components.collections.common.base_service import BaseService +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -64,7 +64,7 @@ def _execute(self, data, parent_data) -> bool: # 密码服务,把用户名也当密码存 query_params = { "instances": [{"ip": global_data["domain"], "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(global_data["username"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(global_data["username"]), "username": MySQLPrivComponent.KAFKA_FAKE_USER.value, "component": NameSpaceEnum.Kafka, "operator": "admin", @@ -73,7 +73,7 @@ def _execute(self, data, parent_data) -> bool: # 存真实的账号密码 query_params = { "instances": [{"ip": global_data["domain"], "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(global_data["password"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(global_data["password"]), "username": global_data["username"], "component": NameSpaceEnum.Kafka, "operator": "admin", diff --git a/dbm-ui/backend/flow/plugins/components/collections/mysql/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/mysql/exec_actuator_script.py index 3b4daf2052..7be43b6e01 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/mysql/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/mysql/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import copy import json import logging @@ -28,6 +27,7 @@ from backend.flow.utils.mysql.get_mysql_sys_user import get_mysql_sys_users from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.script_template import actuator_template, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") cpl = re.compile("(?P.+?)") # 非贪婪模式,只匹配第一次出现的自定义tag @@ -109,9 +109,8 @@ def _execute(self, data, parent_data) -> bool: ) # payload参数转换base64格式 - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) + FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) # 脚本内容 @@ -121,10 +120,10 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, - "script_param": str(base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8"), + "script_param": base64_encode(json.dumps(db_act_template["payload"])), } # self.log_info("[{}] ready start task with body {}".format(node_name, body)) diff --git a/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_os_init.py b/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_os_init.py index 15a860559e..02622a0240 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_os_init.py +++ b/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_os_init.py @@ -20,6 +20,7 @@ from backend.flow.consts import DBA_ROOT_USER, DEFAULT_INSTANCE, MySQLPrivComponent, UserName from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.script_template import fast_execute_script_common_kwargs +from backend.utils.string import base64_encode cpl = re.compile("(?P.+?)") @@ -99,7 +100,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": "DBM_MySQL_OS_Init", - "script_content": str(base64.b64encode(script_content.encode("utf-8")), "utf-8"), + "script_content": base64_encode(script_content), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } @@ -234,7 +235,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": "DBM-Init-Mysql-Os", - "script_content": str(base64.b64encode(script_content.encode("utf-8")), "utf-8"), + "script_content": base64_encode(script_content), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } @@ -287,7 +288,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": "DBM-Get-Os-Sys-Param", - "script_content": str(base64.b64encode(script_content.encode("utf-8")), "utf-8"), + "script_content": base64_encode(script_content), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/pulsar/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/pulsar/exec_actuator_script.py index 153c92b1b4..27c9ae52dc 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/pulsar/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/pulsar/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json from dataclasses import asdict from typing import List @@ -24,6 +23,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.pulsar.pulsar_script_template import ACTUATOR_TEMPLATE, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode class ExecutePulsarActuatorScriptService(BkJobService): @@ -82,9 +82,7 @@ def _execute(self, data, parent_data) -> bool: db_act_template["version_id"] = self._runtime_attrs["version"] db_act_template["uid"] = global_data["uid"] - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) @@ -95,7 +93,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/pulsar/rewrite_pulsar_config.py b/dbm-ui/backend/flow/plugins/components/collections/pulsar/rewrite_pulsar_config.py index b72f5cc9de..c2a121007d 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/pulsar/rewrite_pulsar_config.py +++ b/dbm-ui/backend/flow/plugins/components/collections/pulsar/rewrite_pulsar_config.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 from typing import List from pipeline.component_framework.component import Component @@ -24,6 +23,7 @@ from backend.flow.utils.pulsar.consts import PulsarConfigEnum from backend.flow.utils.pulsar.pulsar_context_dataclass import PulsarApplyContext from backend.ticket.constants import TicketType +from backend.utils.string import base64_encode class WriteBackPulsarConfigService(BaseService): @@ -134,7 +134,7 @@ def write_auth_to_prv_manager(self, global_data: dict, token: str): # 写入到密码服务,把用户名当密码存 query_params = { "instances": [{"ip": global_data["domain"], "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(global_data["username"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(str(global_data["username"])), "username": MySQLPrivComponent.PULSAR_FAKE_USER.value, "component": NameSpaceEnum.Pulsar, "operator": "admin", @@ -143,7 +143,7 @@ def write_auth_to_prv_manager(self, global_data: dict, token: str): # 存储真正的账号密码 query_params = { "instances": [{"ip": global_data["domain"], "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(global_data["password"]).encode("utf-8")).decode("utf-8"), + "password": base64_encode(str(global_data["password"])), "username": global_data["username"], "component": NameSpaceEnum.Pulsar, "operator": "admin", @@ -152,7 +152,7 @@ def write_auth_to_prv_manager(self, global_data: dict, token: str): # 存储token query_params = { "instances": [{"ip": global_data["domain"], "port": 0, "bk_cloud_id": global_data["bk_cloud_id"]}], - "password": base64.b64encode(str(f"token:{token}").encode("utf-8")).decode("utf-8"), + "password": base64_encode(str(f"token:{token}")), "username": PulsarConfigEnum.ClientAuthenticationParameters, "component": NameSpaceEnum.Pulsar, "operator": "admin", diff --git a/dbm-ui/backend/flow/plugins/components/collections/redis/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/redis/exec_actuator_script.py index 05ebaa76ab..14dec6d237 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/redis/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/redis/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging import re @@ -29,6 +28,7 @@ redis_actuator_template, redis_fast_execute_script_common_kwargs, ) +from backend.utils.string import base64_encode logger = logging.getLogger("json") cpl = re.compile("(?P.+?)") # 非贪婪模式,只匹配第一次出现的自定义tag @@ -102,9 +102,7 @@ def _execute(self, data, parent_data) -> bool: if getattr(trans_data, "tendis_backup_info"): db_act_template["payload"]["backup_tasks"] = trans_data.tendis_backup_info - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) @@ -115,7 +113,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, "timeout": 86400, diff --git a/dbm-ui/backend/flow/plugins/components/collections/redis/exec_data_structure_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/redis/exec_data_structure_actuator_script.py index bd82bf601f..65419bdb74 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/redis/exec_data_structure_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/redis/exec_data_structure_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging import re @@ -30,6 +29,7 @@ redis_fast_execute_script_common_kwargs, ) from backend.ticket.constants import TicketType +from backend.utils.string import base64_encode logger = logging.getLogger("json") cpl = re.compile("(?P.+?)") # 非贪婪模式,只匹配第一次出现的自定义tag @@ -108,9 +108,7 @@ def _execute(self, data, parent_data) -> bool: if getattr(trans_data, "tendis_backup_info"): db_act_template["payload"]["backup_tasks"] = trans_data.tendis_backup_info - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) db_act_template["file_name"] = ( @@ -123,7 +121,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/redis/exec_shell_script.py b/dbm-ui/backend/flow/plugins/components/collections/redis/exec_shell_script.py index 48031469d4..7b4c91d357 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/redis/exec_shell_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/redis/exec_shell_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import logging import traceback from typing import List @@ -25,6 +24,7 @@ from backend.flow.plugins.components.collections.common.base_service import BaseService, BkJobService from backend.flow.utils.redis.redis_context_dataclass import RedisDataStructureContext from backend.flow.utils.redis.redis_script_template import redis_fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -56,7 +56,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(shell_command.encode("utf-8")), "utf-8"), + "script_content": base64_encode(shell_command), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } @@ -292,7 +292,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(shell_command.encode("utf-8")), "utf-8"), + "script_content": base64_encode(shell_command), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/redis/psuh_data_structure_json_script.py b/dbm-ui/backend/flow/plugins/components/collections/redis/psuh_data_structure_json_script.py index 53a947e56f..772d8e9d56 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/redis/psuh_data_structure_json_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/redis/psuh_data_structure_json_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import copy import json import logging @@ -28,6 +27,7 @@ from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.redis.redis_script_template import redis_data_structure_payload_template from backend.ticket.constants import TicketType +from backend.utils.string import base64_encode logger = logging.getLogger("json") cpl = re.compile("(?P.+?)") # 非贪婪模式,只匹配第一次出现的自定义tag @@ -103,9 +103,7 @@ def _execute(self, data, parent_data) -> bool: self.log_info(_("[{}] kwargs['payload'] 是不完整,需要将{}内容加到payload中").format(node_name, kwargs["cluster"])) db_act_template["payload"].update(kwargs["cluster"]) - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) # 脚本内容 diff --git a/dbm-ui/backend/flow/plugins/components/collections/redis/redis_dts.py b/dbm-ui/backend/flow/plugins/components/collections/redis/redis_dts.py index f5d550ac9f..fb0d4ffac0 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/redis/redis_dts.py +++ b/dbm-ui/backend/flow/plugins/components/collections/redis/redis_dts.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import datetime import logging import re @@ -61,6 +60,7 @@ from backend.flow.utils.redis.redis_proxy_util import get_twemproxy_cluster_hash_tag from backend.ticket.constants import TicketType from backend.utils.basic import generate_root_id +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -472,12 +472,8 @@ def _execute(self, data, parent_data): job.save() job_id = job.id - src_password_base64 = base64.b64encode( - kwargs["cluster"]["src"]["redis_password"].encode("utf-8") - ).decode("utf-8") - dst_passsword_base64 = base64.b64encode( - kwargs["cluster"]["dst"]["cluster_password"].encode("utf-8") - ).decode("utf-8") + src_password_base64 = base64_encode(kwargs["cluster"]["src"]["redis_password"]) + dst_passsword_base64 = base64_encode(kwargs["cluster"]["dst"]["cluster_password"]) task_white_regex = get_safe_regex_pattern(kwargs["cluster"]["info"]["key_white_regex"]) task_black_regex = get_safe_regex_pattern(kwargs["cluster"]["info"]["key_black_regex"]) diff --git a/dbm-ui/backend/flow/plugins/components/collections/redis/redis_old_backup_records.py b/dbm-ui/backend/flow/plugins/components/collections/redis/redis_old_backup_records.py index c25454a4f7..d98213c198 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/redis/redis_old_backup_records.py +++ b/dbm-ui/backend/flow/plugins/components/collections/redis/redis_old_backup_records.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging from datetime import datetime, timedelta @@ -24,6 +23,7 @@ from backend.flow.models import FlowNode from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.redis.redis_script_template import redis_fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") @@ -90,7 +90,7 @@ def _execute(self, data, parent_data) -> bool: self.log_info(f"start get last {ndays} days old backup records of {server_ip}") query_result = get_last_n_days_backup_records(ndays, bk_cloud_id, server_ip) - encode_str = str(base64.b64encode(json.dumps(query_result).encode("utf-8")), "utf-8") + encode_str = base64_encode(json.dumps(query_result)) self.log_info(f"success get last {ndays} days old backup records of {server_ip}") @@ -104,7 +104,7 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(shell_command.encode("utf-8")), "utf-8"), + "script_content": base64_encode(shell_command), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/riak/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/riak/exec_actuator_script.py index e2c06b3ee4..cc2a458154 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/riak/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/riak/exec_actuator_script.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import copy import json import logging @@ -26,6 +25,7 @@ from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.riak.riak_act_payload import RiakActPayload from backend.flow.utils.script_template import actuator_template, fast_execute_script_common_kwargs +from backend.utils.string import base64_encode logger = logging.getLogger("json") cpl = re.compile("(?P.+?)") # 非贪婪模式,只匹配第一次出现的自定义tag @@ -99,9 +99,7 @@ def _execute(self, data, parent_data) -> bool: db_act_template["uid"] = global_data["uid"] # payload参数转换base64格式 - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) FlowNode.objects.filter(root_id=kwargs["root_id"], node_id=node_id).update(hosts=exec_ips) # 脚本内容 @@ -111,8 +109,8 @@ def _execute(self, data, parent_data) -> bool: body = { "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), - "script_param": str(base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), + "script_param": base64_encode(json.dumps(db_act_template["payload"])), "script_language": 1, "target_server": {"ip_list": target_ip_info}, } diff --git a/dbm-ui/backend/flow/plugins/components/collections/spider/import_metadata.py b/dbm-ui/backend/flow/plugins/components/collections/spider/import_metadata.py index 27059ade76..9de2fb8cfe 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/spider/import_metadata.py +++ b/dbm-ui/backend/flow/plugins/components/collections/spider/import_metadata.py @@ -77,8 +77,8 @@ def _create_entries(cluster_json: Dict, cluster_obj: Cluster): class TenDBClusterImportMetadataService(BaseService): - def __init__(self): - super().__init__() + def __init__(self, name=None): + super().__init__(name=name) self.bk_biz_id = 0 self.db_module_id = 0 self.spider_spec: Spec = Spec() diff --git a/dbm-ui/backend/flow/plugins/components/collections/sqlserver/exec_actuator_script.py b/dbm-ui/backend/flow/plugins/components/collections/sqlserver/exec_actuator_script.py index 1b586234b6..b4b39144a9 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/sqlserver/exec_actuator_script.py +++ b/dbm-ui/backend/flow/plugins/components/collections/sqlserver/exec_actuator_script.py @@ -8,7 +8,6 @@ specific language governing permissions and limitations under the License. """ -import base64 import json import logging import re @@ -25,6 +24,7 @@ from backend.flow.plugins.components.collections.common.base_service import BkJobService from backend.flow.utils.script_template import sqlserver_actuator_template from backend.flow.utils.sqlserver.sqlserver_act_payload import SqlserverActPayload +from backend.utils.string import base64_encode logger = logging.getLogger("json") cpl = re.compile("(?P.+?)") # 非贪婪模式,只匹配第一次出现的自定义tag @@ -84,9 +84,8 @@ def _execute(self, data, parent_data) -> bool: ) # payload参数转换base64格式 - db_act_template["payload"] = str( - base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8" - ) + db_act_template["payload"] = base64_encode(json.dumps(db_act_template["payload"])) + # 更新节点信息 FlowNode.objects.filter(root_id=root_id, node_id=node_id).update(hosts=exec_ips) @@ -100,10 +99,10 @@ def _execute(self, data, parent_data) -> bool: "is_param_sensitive": 1, "bk_biz_id": env.JOB_BLUEKING_BIZ_ID, "task_name": f"DBM_{node_name}_{node_id}", - "script_content": str(base64.b64encode(template.render(db_act_template).encode("utf-8")), "utf-8"), + "script_content": base64_encode(template.render(db_act_template)), "script_language": 5, "target_server": {"ip_list": exec_ips}, - "script_param": str(base64.b64encode(json.dumps(db_act_template["payload"]).encode("utf-8")), "utf-8"), + "script_param": base64_encode(json.dumps(db_act_template["payload"])), } self.log_debug("[{}] ready start task with body {}".format(node_name, body)) diff --git a/dbm-ui/backend/flow/utils/base/payload_handler.py b/dbm-ui/backend/flow/utils/base/payload_handler.py index ffa90aeee8..82b62c32d5 100644 --- a/dbm-ui/backend/flow/utils/base/payload_handler.py +++ b/dbm-ui/backend/flow/utils/base/payload_handler.py @@ -25,6 +25,7 @@ from backend.flow.consts import DEFAULT_INSTANCE, ConfigTypeEnum, LevelInfoEnum, MySQLPrivComponent, UserName from backend.flow.utils.mysql.get_mysql_sys_user import generate_mysql_tmp_user from backend.ticket.constants import TicketType +from backend.utils.string import base64_encode apply_list = [ TicketType.MYSQL_SINGLE_APPLY.value, @@ -297,17 +298,17 @@ def redis_save_cluster_password( if redis_password and (not redis_password.isspace()): query_params["component"] = MySQLPrivComponent.REDIS.value - query_params["password"] = base64.b64encode(redis_password.encode("utf-8")).decode("utf-8") + query_params["password"] = base64_encode(redis_password) DBPrivManagerApi.modify_password(params=query_params) if redis_proxy_password and (not redis_proxy_password.isspace()): query_params["component"] = MySQLPrivComponent.REDIS_PROXY.value - query_params["password"] = base64.b64encode(redis_proxy_password.encode("utf-8")).decode("utf-8") + query_params["password"] = base64_encode(redis_proxy_password) DBPrivManagerApi.modify_password(params=query_params) if redis_proxy_admin_password and (not redis_proxy_admin_password.isspace()): query_params["component"] = MySQLPrivComponent.REDIS_PROXY_ADMIN.value - query_params["password"] = base64.b64encode(redis_proxy_admin_password.encode("utf-8")).decode("utf-8") + query_params["password"] = base64_encode(redis_proxy_admin_password) DBPrivManagerApi.modify_password(params=query_params) return True diff --git a/dbm-ui/backend/flow/utils/cloud/cloud_act_payload.py b/dbm-ui/backend/flow/utils/cloud/cloud_act_payload.py index 784d53a553..d79d3324bd 100644 --- a/dbm-ui/backend/flow/utils/cloud/cloud_act_payload.py +++ b/dbm-ui/backend/flow/utils/cloud/cloud_act_payload.py @@ -8,7 +8,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import base64 import json import logging @@ -30,6 +29,7 @@ ) from backend.flow.engine.exceptions import ServiceDoesNotApply from backend.flow.utils.base.payload_handler import PayloadHandler +from backend.utils.string import base64_encode logger = logging.getLogger("flow") @@ -199,7 +199,7 @@ def get_redis_dts_server_apply_payload(self): "password": redis_os_acc["os_password"], } paylod_json = json.dumps(paylod_obj) - payload_base64 = str(base64.b64encode(paylod_json.encode("utf-8")), "utf-8") + payload_base64 = base64_encode(paylod_json) return { "bk_dbm_nginx_url": nginx_url, "bk_dbm_cloud_id": self.cloud_id, diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py b/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py index 7411851d8d..286c1fe1d6 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py @@ -14,6 +14,7 @@ from django.db.transaction import atomic from backend.components import DBPrivManagerApi +from backend.components.mysql_partition.client import DBPartitionApi from backend.configuration.constants import DBType from backend.db_meta import api from backend.db_meta.api.cluster.tendbha.handler import TenDBHAClusterHandler @@ -466,13 +467,31 @@ def mysql_cluster_offline(self): """ 定义更新cluster集群的为offline 状态 """ - Cluster.objects.filter(id=self.cluster["id"]).update(phase=ClusterPhase.OFFLINE) + cluster = Cluster.objects.get(id=self.cluster["id"]) + cluster.phase = ClusterPhase.OFFLINE + cluster.save() + # 修改分区配置为禁用状态 - offlinewithclu + disable_partition_params = { + "cluster_type": cluster.cluster_type, + "operator": self.ticket_data["created_by"], + "cluster_ids": [cluster.id], + } + DBPartitionApi.disable_partition_cluster(params=disable_partition_params) def mysql_cluster_online(self): """ 定义更新cluster集群的为online 状态 """ - Cluster.objects.filter(id=self.cluster["id"]).update(phase=ClusterPhase.ONLINE) + cluster = Cluster.objects.get(id=self.cluster["id"]) + cluster.phase = ClusterPhase.ONLINE + cluster.save() + # 修改分区配置为启用状态 - online + disable_partition_params = { + "cluster_type": cluster.cluster_type, + "operator": self.ticket_data["created_by"], + "cluster_ids": [cluster.id], + } + DBPartitionApi.enable_partition_cluster(params=disable_partition_params) def mysql_migrate_cluster_switch_storage(self): """ diff --git a/dbm-ui/backend/tests/conftest.py b/dbm-ui/backend/tests/conftest.py index 1d6fa53556..dcd586a645 100644 --- a/dbm-ui/backend/tests/conftest.py +++ b/dbm-ui/backend/tests/conftest.py @@ -8,6 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +import ipaddress import os import mock @@ -15,10 +16,12 @@ from django.contrib.auth import get_user_model from django.utils.crypto import get_random_string -from backend.db_meta.enums import ClusterType -from backend.db_meta.models import BKCity, DBModule, LogicalCity +from backend.db_meta import models +from backend.db_meta.enums import AccessLayer, ClusterType, MachineType +from backend.db_meta.models import BKCity, Cluster, DBModule, LogicalCity, Machine from backend.tests.constants import TEST_ADMIN_USERNAME from backend.tests.mock_data import constant +from backend.tests.mock_data.components import cc def mock_bk_user(username): @@ -53,6 +56,46 @@ def create_city(): BKCity.objects.create(logical_city_id=2, bk_idc_city_id=28, bk_idc_city_name="上海") +@pytest.fixture +def machine_fixture(create_city): + bk_city = BKCity.objects.first() + Machine.objects.create( + ip=cc.NORMAL_IP2, + bk_biz_id=constant.BK_BIZ_ID, + machine_type=MachineType.BACKEND.value, + bk_city=bk_city, + bk_cloud_id=1, + bk_host_id=2, + ) + + +@pytest.fixture +def init_proxy_machine(create_city): + bk_city = models.BKCity.objects.first() + machine = models.Machine.objects.create( + ip=cc.NORMAL_IP, + bk_biz_id=constant.BK_BIZ_ID, + machine_type=MachineType.BACKEND.value, + bk_city=bk_city, + access_layer=AccessLayer.PROXY, + ) + return machine + + +@pytest.fixture +def init_storage_machine(create_city): + bk_city = models.BKCity.objects.first() + machine = models.Machine.objects.create( + ip=cc.NORMAL_IP, + bk_biz_id=constant.BK_BIZ_ID, + machine_type=MachineType.BACKEND.value, + bk_city=bk_city, + access_layer=AccessLayer.STORAGE, + bk_host_id=int(ipaddress.IPv4Address(cc.NORMAL_IP)), + ) + return machine + + @pytest.fixture def init_db_module(): DBModule.objects.create( @@ -63,4 +106,15 @@ def init_db_module(): ) +@pytest.fixture +def init_cluster(): + Cluster.objects.create( + bk_biz_id=constant.BK_BIZ_ID, + name=constant.CLUSTER_NAME, + db_module_id=constant.DB_MODULE_ID, + immute_domain=constant.CLUSTER_IMMUTE_DOMAIN, + cluster_type=ClusterType.TenDBHA.value, + ) + + mark_global_skip = pytest.mark.skipif(os.environ.get("GLOBAL_SKIP") == "true", reason="disable in landun WIP") diff --git a/dbm-ui/backend/tests/db_meta/api/machine/test_apis.py b/dbm-ui/backend/tests/db_meta/api/machine/test_apis.py index 06c3e1782c..cd61f22a73 100644 --- a/dbm-ui/backend/tests/db_meta/api/machine/test_apis.py +++ b/dbm-ui/backend/tests/db_meta/api/machine/test_apis.py @@ -14,26 +14,13 @@ from backend.db_meta import api from backend.db_meta.enums import MachineType -from backend.db_meta.models import BKCity, Machine +from backend.db_meta.models import Machine from backend.tests.mock_data import constant from backend.tests.mock_data.components import cc pytestmark = pytest.mark.django_db -@pytest.fixture -def machine_fixture(create_city): - bk_city = BKCity.objects.first() - Machine.objects.create( - ip=cc.NORMAL_IP2, - bk_biz_id=constant.BK_BIZ_ID, - machine_type=MachineType.BACKEND.value, - bk_city=bk_city, - bk_cloud_id=1, - bk_host_id=2, - ) - - class TestCreateMachine: @patch("backend.db_meta.api.machine.apis.CCApi", cc.CCApiMock()) def test_create_success(self, create_city): diff --git a/dbm-ui/backend/tests/db_meta/api/proxy_instance/test_apis.py b/dbm-ui/backend/tests/db_meta/api/proxy_instance/test_apis.py index 6dc46be8d9..0453306585 100644 --- a/dbm-ui/backend/tests/db_meta/api/proxy_instance/test_apis.py +++ b/dbm-ui/backend/tests/db_meta/api/proxy_instance/test_apis.py @@ -23,19 +23,6 @@ TEST_INVALID_PROXY_PORT = 99999 -@pytest.fixture -def init_proxy_machine(create_city): - bk_city = models.BKCity.objects.first() - machine = models.Machine.objects.create( - ip=cc.NORMAL_IP, - bk_biz_id=constant.BK_BIZ_ID, - machine_type=MachineType.BACKEND.value, - bk_city=bk_city, - access_layer=AccessLayer.PROXY, - ) - return machine - - class TestProxyInstance: def test_create_success(self, init_proxy_machine): """创建成功""" diff --git a/dbm-ui/backend/tests/db_meta/models/__init__.py b/dbm-ui/backend/tests/db_meta/models/__init__.py new file mode 100644 index 0000000000..aa5085c628 --- /dev/null +++ b/dbm-ui/backend/tests/db_meta/models/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" diff --git a/dbm-ui/backend/tests/db_meta/models/test_machine.py b/dbm-ui/backend/tests/db_meta/models/test_machine.py new file mode 100644 index 0000000000..90834ea394 --- /dev/null +++ b/dbm-ui/backend/tests/db_meta/models/test_machine.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import base64 +import gzip +import json +from unittest.mock import patch + +import pytest + +from backend.db_meta import api +from backend.db_meta.models import Cluster, Machine +from backend.tests.db_meta.api.dbha.test_apis import TEST_PROXY_PORT1, TEST_PROXY_PORT2 +from backend.tests.mock_data.components import cc +from backend.tests.mock_data.components.cc import CCApiMock + +pytestmark = pytest.mark.django_db + + +class TestMachine: + @patch("backend.db_meta.models.app.CCApi", CCApiMock()) + def test_dbm_meta(self, init_proxy_machine, init_cluster): + machine = Machine.objects.first() + cluster = Cluster.objects.first() + proxy_objs = api.proxy_instance.create( + [ + { + "ip": cc.NORMAL_IP, + "port": TEST_PROXY_PORT1, + }, + { + "ip": cc.NORMAL_IP, + "port": TEST_PROXY_PORT2, + }, + ] + ) + cluster.proxyinstance_set.add(*proxy_objs) + dbm_meta = machine.dbm_meta + assert dbm_meta == { + "version": "v2", + "content": "H4sIAAAAAAAC/9WOSwoCMRBErzL0WobMgBuvIjLkJwaTdEwiTAi5u90uRI9gr6rrFUV10BgCRjhNfRwm0" + "M9SMdB37iBTIgFANkln+FmFOAq+lW3tKW7zZjBIxx1wlXc7GzVT63egtmQZVxuNuklGRn3c0MrDs+diqT" + "Jqu2X0b5Iy7u2HJMyVyUIbFqDJf7BTwLiMF+mlgLRqAQAA", + } + + # 将Base64字符串解码回压缩的字节数据 + compressed_bytes_from_base64 = base64.b64decode(dbm_meta["content"]) + + # 使用gzip解压缩 + decompressed_bytes = gzip.decompress(compressed_bytes_from_base64) + + # 将解压缩的字节解码回原始字符串 + decompressed_string = decompressed_bytes.decode("utf-8") + dbm_meta_content = json.loads(decompressed_string) + assert dbm_meta_content == { + "common": {}, + "custom": [ + { + "app": "", + "appid": "2005000002", + "cluster_domain": "fake.db.com", + "cluster_type": "tendbha", + "db_type": "mysql", + "instance_role": "proxy", + "instance_port": "10001", + }, + { + "app": "", + "appid": "2005000002", + "cluster_domain": "fake.db.com", + "cluster_type": "tendbha", + "db_type": "mysql", + "instance_role": "proxy", + "instance_port": "10000", + }, + ], + } diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_openarea.py b/dbm-ui/backend/ticket/builders/mysql/mysql_openarea.py index c8c744b7de..746e936771 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_openarea.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_openarea.py @@ -45,7 +45,9 @@ class AccountRulesSerializer(serializers.Serializer): cluster_id = serializers.IntegerField(help_text=_("源集群ID")) force = serializers.BooleanField(help_text=_("是否强制执行"), required=False, default=False) config_data = serializers.ListSerializer(help_text=_("分区信息"), child=ConfigDataSerializer()) - rules_set = serializers.ListSerializer(help_text=_("授权信息"), child=PrivDataSerializer()) + rules_set = serializers.ListSerializer( + help_text=_("授权信息"), child=PrivDataSerializer(), required=False, allow_null=True, allow_empty=True + ) class MysqlOpenAreaParamBuilder(builders.FlowParamBuilder): diff --git a/dbm-ui/backend/ticket/builders/riak/riak_apply.py b/dbm-ui/backend/ticket/builders/riak/riak_apply.py index bcc88ee04a..78b9c70eff 100644 --- a/dbm-ui/backend/ticket/builders/riak/riak_apply.py +++ b/dbm-ui/backend/ticket/builders/riak/riak_apply.py @@ -80,7 +80,8 @@ class RiakApplyFlowBuilder(BaseRiakTicketFlowBuilder): def patch_ticket_detail(self): details = self.ticket.details db_module_name = DBModule.objects.get(db_module_id=details["db_module_id"]).db_module_name - riak_cluster_name = riak_domain = f"{details['cluster_name']}.{db_module_name}" + riak_cluster_name = f"{details['cluster_name']}-{db_module_name}" + riak_domain = f"riak.{details['cluster_name']}-{db_module_name}.{details['db_app_abbr']}.db" details.update( db_module_name=db_module_name, cluster_name=riak_cluster_name, diff --git a/dbm-ui/backend/utils/string.py b/dbm-ui/backend/utils/string.py index 8a9d3965bd..ae30ee2f15 100644 --- a/dbm-ui/backend/utils/string.py +++ b/dbm-ui/backend/utils/string.py @@ -8,7 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ - +import base64 import json import re from typing import Any, List, Optional, Tuple, Union @@ -186,3 +186,24 @@ def split_str_to_list(string: str) -> List[str]: for char in [" ", "\n", "\t", "\r", "\f", "\v"]: string = string.replace(char, ",") return string.split(",") + + +def base64_encode(content: Union[str, bytes]) -> str: + """ + 将字符串转为base64编码 + :param content: 待转换字符串 + """ + if isinstance(content, str): + content = content.encode("utf-8") + + return base64.b64encode(content).decode("utf-8") + + +def base64_decode(content: Union[str, bytes]) -> str: + """ + 将base64编码的字符串转为原始字符串 + :param content: 待转换字符串 + """ + if isinstance(content, str): + content = content.encode("utf-8") + return base64.b64decode(content).decode("utf-8") diff --git a/dbm-ui/bin/pytest.sh b/dbm-ui/bin/pytest.sh index a2f95c0c1b..8fa90909b5 100755 --- a/dbm-ui/bin/pytest.sh +++ b/dbm-ui/bin/pytest.sh @@ -4,4 +4,4 @@ cd $SCRIPT_DIR && cd .. || exit 1 source bin/environ.sh -pytest backend/tests +pytest backend/tests --cov diff --git a/dbm-ui/frontend/src/views/mysql/openarea/template-create/components/config-rule/components/RenderData/RenderPrivData/components/PermissionRule.vue b/dbm-ui/frontend/src/components/add-permission-rule-dialog/Index.vue similarity index 88% rename from dbm-ui/frontend/src/views/mysql/openarea/template-create/components/config-rule/components/RenderData/RenderPrivData/components/PermissionRule.vue rename to dbm-ui/frontend/src/components/add-permission-rule-dialog/Index.vue index 4629cf8ee8..602fe06481 100644 --- a/dbm-ui/frontend/src/views/mysql/openarea/template-create/components/config-rule/components/RenderData/RenderPrivData/components/PermissionRule.vue +++ b/dbm-ui/frontend/src/components/add-permission-rule-dialog/Index.vue @@ -13,7 +13,8 @@ :columns="columns" :container-height="600" :data-source="getPermissionRules" - settings /> + :remote-pagination="false" + :settings="settings" /> - - diff --git a/dbm-ui/frontend/src/components/instance-selector/components/RenderManualInput.vue b/dbm-ui/frontend/src/components/instance-selector/components/RenderManualInput.vue deleted file mode 100644 index 64f105282b..0000000000 --- a/dbm-ui/frontend/src/components/instance-selector/components/RenderManualInput.vue +++ /dev/null @@ -1,308 +0,0 @@ - - - - - - diff --git a/dbm-ui/frontend/src/components/instance-selector-new/components/common/PanelTab.vue b/dbm-ui/frontend/src/components/instance-selector/components/common/PanelTab.vue similarity index 100% rename from dbm-ui/frontend/src/components/instance-selector-new/components/common/PanelTab.vue rename to dbm-ui/frontend/src/components/instance-selector/components/common/PanelTab.vue diff --git a/dbm-ui/frontend/src/components/instance-selector-new/components/common/manual-content/Index.vue b/dbm-ui/frontend/src/components/instance-selector/components/common/manual-content/Index.vue similarity index 95% rename from dbm-ui/frontend/src/components/instance-selector-new/components/common/manual-content/Index.vue rename to dbm-ui/frontend/src/components/instance-selector/components/common/manual-content/Index.vue index a5eecb3e80..b9502c8782 100644 --- a/dbm-ui/frontend/src/components/instance-selector-new/components/common/manual-content/Index.vue +++ b/dbm-ui/frontend/src/components/instance-selector/components/common/manual-content/Index.vue @@ -92,24 +92,22 @@ - + diff --git a/dbm-ui/frontend/src/components/instance-selector/components/mongo/table/Index.vue b/dbm-ui/frontend/src/components/instance-selector/components/mongo/table/Index.vue new file mode 100644 index 0000000000..549055145c --- /dev/null +++ b/dbm-ui/frontend/src/components/instance-selector/components/mongo/table/Index.vue @@ -0,0 +1,355 @@ + + + + + + diff --git a/dbm-ui/frontend/src/components/instance-selector/components/mongo/table/useTableData.ts b/dbm-ui/frontend/src/components/instance-selector/components/mongo/table/useTableData.ts new file mode 100644 index 0000000000..0ae753b7ef --- /dev/null +++ b/dbm-ui/frontend/src/components/instance-selector/components/mongo/table/useTableData.ts @@ -0,0 +1,98 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for + * the specific language governing permissions and limitations under the License. +*/ + +import { type ComponentInternalInstance } from 'vue'; + +import { useGlobalBizs } from '@stores'; + +/** + * 处理集群列表数据 + */ +export function useTableData(clusterId?: Ref) { + const { currentBizId } = useGlobalBizs(); + const currentInstance = getCurrentInstance() as ComponentInternalInstance & { + proxy: { + getTableList: (params: any) => Promise + } + }; + + const isLoading = ref(false); + const tableData = shallowRef([]); + const isAnomalies = ref(false); + const pagination = reactive({ + count: 0, + current: 1, + limit: 10, + limitList: [10, 20, 50, 100], + align: 'right', + layout: ['total', 'limit', 'list'], + }); + const searchValue = ref(''); + + watch(searchValue, () => { + setTimeout(() => { + handleChangePage(1); + }); + }); + + const fetchResources = async () => { + isLoading.value = true; + const params = { + bk_biz_id: currentBizId, + instance_address: searchValue.value, + limit: pagination.limit, + offset: (pagination.current - 1) * pagination.limit, + extra: 1, + }; + if (clusterId?.value && clusterId.value !== currentBizId) { + Object.assign(params, { + cluster_id: clusterId.value, + }); + } + return currentInstance.proxy.getTableList(params) + .then((data) => { + const ret = data; + tableData.value = ret.results; + pagination.count = ret.count; + isAnomalies.value = false; + }) + .catch(() => { + tableData.value = []; + pagination.count = 0; + isAnomalies.value = true; + }) + .finally(() => { + isLoading.value = false; + }); + }; + + const handleChangePage = (value: number) => { + pagination.current = value; + return fetchResources(); + }; + + const handeChangeLimit = (value: number) => { + pagination.limit = value; + return handleChangePage(1); + }; + + return { + isLoading, + data: tableData, + pagination, + searchValue, + fetchResources, + handleChangePage, + handeChangeLimit, + }; +} diff --git a/dbm-ui/frontend/src/components/instance-selector/components/mongo/useTopoData.ts b/dbm-ui/frontend/src/components/instance-selector/components/mongo/useTopoData.ts new file mode 100644 index 0000000000..b42bb373ad --- /dev/null +++ b/dbm-ui/frontend/src/components/instance-selector/components/mongo/useTopoData.ts @@ -0,0 +1,96 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for + * the specific language governing permissions and limitations under the License. +*/ +import { type ComponentInternalInstance } from 'vue'; + +import { useGlobalBizs } from '@stores'; + +interface TopoTreeData { + id: number; + name: string; + obj: 'biz' | 'cluster', + count: number, + children: Array; +} + +/** + * 处理集群列表数据 + */ +export function useTopoData>(filterClusterId: ComputedRef) { + const { currentBizId, currentBizInfo } = useGlobalBizs(); + const currentInstance = getCurrentInstance() as ComponentInternalInstance & { + proxy: { + getTopoList: (params: any) => Promise, + countFunc?: (data: T) => number + }, + }; + + const isLoading = ref(false); + const selectClusterId = ref(); + const treeRef = ref(); + + const treeData = shallowRef([]); + + /** + * 获取列表 + */ + const fetchResources = async () => { + isLoading.value = true; + const params = { + offset: 0, + limit: -1, + } as Record; + if (filterClusterId.value) { + params.cluster_ids = filterClusterId.value; + } + return currentInstance.proxy.getTopoList(params).then((data) => { + const countFn = currentInstance.proxy?.countFunc; + const formatData = data.map((item: T) => ({ ...item, count: countFn ? countFn(item) : item.remote_db.length })); + const children = formatData.map((item: T) => ({ + id: item.id, + name: item.master_domain, + obj: 'cluster', + count: item.count, + children: [], + })); + treeData.value = filterClusterId.value ? children : [ + { + name: currentBizInfo?.display_name || '--', + id: currentBizId, + obj: 'biz', + count: formatData.reduce((count: number, item: any) => count + item.count, 0), + children, + }, + ]; + setTimeout(() => { + if (data.length > 0) { + const [firstNode] = treeData.value; + selectClusterId.value = firstNode.id; + const [firstRawNode] = treeRef.value.getData().data; + treeRef.value.setOpen(firstRawNode); + treeRef.value.setSelect(firstRawNode); + } + }); + }) + .finally(() => { + isLoading.value = false; + }); + }; + + return { + treeRef, + isLoading, + treeData, + selectClusterId, + fetchResources, + }; +} diff --git a/dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/Index.vue b/dbm-ui/frontend/src/components/instance-selector/components/mysql/Index.vue similarity index 90% rename from dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/Index.vue rename to dbm-ui/frontend/src/components/instance-selector/components/mysql/Index.vue index caf89068e8..1ef9ca5151 100644 --- a/dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/Index.vue +++ b/dbm-ui/frontend/src/components/instance-selector/components/mysql/Index.vue @@ -77,10 +77,13 @@ - diff --git a/dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/table/useTableData.ts b/dbm-ui/frontend/src/components/instance-selector/components/tendb-cluster/table/useTableData.ts similarity index 100% rename from dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/table/useTableData.ts rename to dbm-ui/frontend/src/components/instance-selector/components/tendb-cluster/table/useTableData.ts diff --git a/dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/useTopoData.ts b/dbm-ui/frontend/src/components/instance-selector/components/tendb-cluster/useTopoData.ts similarity index 97% rename from dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/useTopoData.ts rename to dbm-ui/frontend/src/components/instance-selector/components/tendb-cluster/useTopoData.ts index fa9e155c84..41ac202e8d 100644 --- a/dbm-ui/frontend/src/components/instance-selector-new/components/tendb-cluster/useTopoData.ts +++ b/dbm-ui/frontend/src/components/instance-selector/components/tendb-cluster/useTopoData.ts @@ -14,7 +14,7 @@ import { type ComponentInternalInstance } from 'vue'; import { useGlobalBizs } from '@stores'; -import { activePanelInjectionKey } from '@components/instance-selector-new/Index.vue'; +import { activePanelInjectionKey } from '../../Index.vue'; interface TopoTreeData { id: number; diff --git a/dbm-ui/frontend/src/locales/zh-cn.json b/dbm-ui/frontend/src/locales/zh-cn.json index c6232855ae..4a8105daae 100644 --- a/dbm-ui/frontend/src/locales/zh-cn.json +++ b/dbm-ui/frontend/src/locales/zh-cn.json @@ -2779,6 +2779,35 @@ "源 DB": "源 DB", "表结构": "表结构", "初始化授权不能为空": "初始化授权不能为空", + "变量名不能为空": "变量名不能为空", + "变量说明不能为空": "变量说明不能为空", + "Bookkeeper_Broker 至少扩容一种类型": "Bookkeeper_Broker 至少扩容一种类型", + "云区域ID": "云区域ID", + "IP端口": "IP端口", + "所有异常主机": "所有异常主机", + "清除异常 IP": "清除异常 IP", + "暂无可复制 IP": "暂无可复制 IP", + "请输入合理的范围值": "请输入合理的范围值", + "暂无可复制异常 IP": "暂无可复制异常 IP", + "ConfigSvr规格": "ConfigSvr规格", + "ShardSvr规格": "ShardSvr规格", + "删除后不可恢复,请谨慎操作!": "删除后不可恢复,请谨慎操作!", + "添加运维节点任务提交成功": "添加运维节点任务提交成功", + "DB 重命名任务提交成功": "DB 重命名任务提交成功", + "DB 实例权限克隆任务提交成功": "DB 实例权限克隆任务提交成功", + "每组主机部署集群": "每组主机部署集群", + "包含特殊字符,除空格外": "包含特殊字符,除空格外", + "数据库读写权限(DML)": "数据库读写权限(DML)", + "扩容数量": "扩容数量", + "回档至指定时间 ": "回档至指定时间 ", + "目标副本集集群": "目标副本集集群", + "Config Server资源规格": "Config Server资源规格", + "Mongos资源规格": "Mongos资源规格", + "ShardSvr资源规格": "ShardSvr资源规格", + "临时集群名称": "临时集群名称", + "授权IP": "授权IP", + "主从版": "主从版", + "该集群关联了多个同机集群,将一同勾选": "该集群关联了多个同机集群,将一同勾选", "暂无规则,": "暂无规则,", "变量名不能为空": "变量名不能为空", "变量说明不能为空": "变量说明不能为空", @@ -2809,5 +2838,13 @@ "授权IP": "授权IP", "主从版": "主从版", "该集群关联了多个同机集群,将一同勾选": "该集群关联了多个同机集群,将一同勾选", + "定点构造执行": "定点构造执行", + "迁移主从": "迁移主从", + "迁移主从任务提交成功": "迁移主从任务提交成功", + "迁移主从执行": "迁移主从执行", + "迁移主从:集群主从实例将成对迁移至新机器。默认迁移同机所有关联集群,也可迁移部分集群,迁移会下架旧实例": "迁移主从:集群主从实例将成对迁移至新机器。默认迁移同机所有关联集群,也可迁移部分集群,迁移会下架旧实例", + "将从库主机的全部实例重建到新主机": "将从库主机的全部实例重建到新主机", + "去创建": "去创建", + "只能英文字母开头": "只能英文字母开头", "这行勿动!新增翻译请在上一行添加!": "" } \ No newline at end of file diff --git a/dbm-ui/frontend/src/services/model/es/es.ts b/dbm-ui/frontend/src/services/model/es/es.ts index 77665c7be6..54e28f638d 100644 --- a/dbm-ui/frontend/src/services/model/es/es.ts +++ b/dbm-ui/frontend/src/services/model/es/es.ts @@ -215,8 +215,12 @@ export default class Es { return this.phase === 'online'; } + get isOffline() { + return this.phase === 'offline'; + } + get domainDisplayName() { - const port = this.es_datanode_hot[0]?.port; + const { port } = this.es_master[0]; const displayName = port ? `${this.domain}:${port}` : this.domain; return displayName; } diff --git a/dbm-ui/frontend/src/services/model/hdfs/hdfs.ts b/dbm-ui/frontend/src/services/model/hdfs/hdfs.ts index 4dcc181608..b6c2d07155 100644 --- a/dbm-ui/frontend/src/services/model/hdfs/hdfs.ts +++ b/dbm-ui/frontend/src/services/model/hdfs/hdfs.ts @@ -209,6 +209,10 @@ export default class Hdfs { return this.phase === 'online'; } + get isOffline() { + return this.phase === 'offline'; + } + get domainDisplayName() { const port = this.hdfs_namenode[0]?.port; const displayName = port ? `${this.domain}:${port}` : this.domain; diff --git a/dbm-ui/frontend/src/services/model/influxdb/influxdbInstance.ts b/dbm-ui/frontend/src/services/model/influxdb/influxdbInstance.ts index a8a6f8cd6d..95f4b47e85 100644 --- a/dbm-ui/frontend/src/services/model/influxdb/influxdbInstance.ts +++ b/dbm-ui/frontend/src/services/model/influxdb/influxdbInstance.ts @@ -157,6 +157,10 @@ export default class InfluxDBInstance { return this.phase === 'online'; } + get isOffline() { + return this.phase === 'offline'; + } + get isRebooting() { return Boolean(this.operations.find((item) => item.ticket_type === InfluxDBInstance.INFLUXDB_REBOOT)); } diff --git a/dbm-ui/frontend/src/services/model/pulsar/pulsar.ts b/dbm-ui/frontend/src/services/model/pulsar/pulsar.ts index 9ce5423679..55c8df07f7 100644 --- a/dbm-ui/frontend/src/services/model/pulsar/pulsar.ts +++ b/dbm-ui/frontend/src/services/model/pulsar/pulsar.ts @@ -209,6 +209,10 @@ export default class Pulsar { return this.phase === 'online'; } + get isOffline() { + return this.phase === 'offline'; + } + get isNew() { return isRecentDays(this.create_at, 24 * 3); } diff --git a/dbm-ui/frontend/src/services/model/riak/riak.ts b/dbm-ui/frontend/src/services/model/riak/riak.ts index d476dde0fa..45770708a8 100644 --- a/dbm-ui/frontend/src/services/model/riak/riak.ts +++ b/dbm-ui/frontend/src/services/model/riak/riak.ts @@ -104,6 +104,8 @@ export default class Riak { update_at: string; access_url: string; cap_usage: number; + db_module_id: number; + db_module_name: string; constructor(payload = {} as Riak) { this.id = payload.id; @@ -128,6 +130,8 @@ export default class Riak { this.update_at = payload.update_at; this.access_url = payload.access_url; this.cap_usage = payload.cap_usage; + this.db_module_id = payload.db_module_id; + this.db_module_name = payload.db_module_name; } get isNewRow() { diff --git a/dbm-ui/frontend/src/services/openarea.ts b/dbm-ui/frontend/src/services/openarea.ts index ad4e8e4775..0f24805d1b 100644 --- a/dbm-ui/frontend/src/services/openarea.ts +++ b/dbm-ui/frontend/src/services/openarea.ts @@ -60,6 +60,7 @@ export const getPreview = function (params: { execute_objects: { authorize_ips: string[]; data_tblist: string[]; + error_msg: string; priv_data: number[]; schema_tblist: string[]; source_db: string; diff --git a/dbm-ui/frontend/src/views/es-manage/list/components/list/Index.vue b/dbm-ui/frontend/src/views/es-manage/list/components/list/Index.vue index ab99554327..d263417740 100644 --- a/dbm-ui/frontend/src/views/es-manage/list/components/list/Index.vue +++ b/dbm-ui/frontend/src/views/es-manage/list/components/list/Index.vue @@ -408,7 +408,7 @@ { t('获取访问方式') } , ]; - if (!data.isOnline) { + if (data.isOffline) { return [ handleRemove(data)}> diff --git a/dbm-ui/frontend/src/views/hdfs-manage/list/components/list/Index.vue b/dbm-ui/frontend/src/views/hdfs-manage/list/components/list/Index.vue index 44560a72bd..ef4f391bce 100644 --- a/dbm-ui/frontend/src/views/hdfs-manage/list/components/list/Index.vue +++ b/dbm-ui/frontend/src/views/hdfs-manage/list/components/list/Index.vue @@ -218,10 +218,8 @@ }, ]; - const checkClusterOnline = (data: HdfsModel) => data.phase === 'online'; - const getRowClass = (data: HdfsModel) => { - const classList = [checkClusterOnline(data) ? '' : 'is-offline']; + const classList = [data.isOnline ? '' : 'is-offline']; const newClass = isRecentDays(data.create_at, 24 * 3) ? 'is-new-row' : ''; classList.push(newClass); if (data.id === clusterId.value) { @@ -309,7 +307,7 @@ data.operationTagTips.map(item => ) } @@ -444,7 +442,7 @@ { t('查看访问配置') } , ]; - if (!checkClusterOnline(data)) { + if (data.isOffline) { return [ handleRemove(data)}> diff --git a/dbm-ui/frontend/src/views/influxdb-manage/instance-list/components/render-list/Index.vue b/dbm-ui/frontend/src/views/influxdb-manage/instance-list/components/render-list/Index.vue index c8b2904593..2cd21c72bd 100644 --- a/dbm-ui/frontend/src/views/influxdb-manage/instance-list/components/render-list/Index.vue +++ b/dbm-ui/frontend/src/views/influxdb-manage/instance-list/components/render-list/Index.vue @@ -398,7 +398,7 @@ class="mr-8" loading={tableDataActionLoadingMap.value[data?.id]} text - disabled={data.operationDisabled} + disabled={Boolean(data.operationTicketId)} theme="primary" action-id="influxdb_destroy" permission={data.permission.influxdb_destroy} diff --git a/dbm-ui/frontend/src/views/kafka-manage/list/components/list/Index.vue b/dbm-ui/frontend/src/views/kafka-manage/list/components/list/Index.vue index f0b055b267..27e0018e51 100644 --- a/dbm-ui/frontend/src/views/kafka-manage/list/components/list/Index.vue +++ b/dbm-ui/frontend/src/views/kafka-manage/list/components/list/Index.vue @@ -384,7 +384,7 @@ { t('获取访问方式') } , ]; - if (!data.isOnline) { + if (data.isOffline) { return [ >([createRowData({})]); + const selectedIps = shallowRef>({ [ClusterTypes.TENDBHA]: [] }); const formData = reactive({ is_check_process: false, @@ -123,16 +131,15 @@ is_check_delay: false, }); - const panelList = [ - { - id: 'tendbha', - title: t('故障主库主机'), - }, - { - id: 'manualInput', - title: t('手动输入'), - }, - ]; + const tabListConfig = { + [ClusterTypes.TENDBHA]: [ + { + name: t('故障主库主机'), + }, + ], + } as unknown as Record; + + let ipMemo = {} as Record; // 批量录入 const handleShowBatchEntry = () => { @@ -154,9 +161,9 @@ isShowMasterInstanceSelector.value = true; }; // Master 批量选择 - const handelMasterProxyChange = (data: InstanceSelectorValues) => { - const ipMemo = {} as Record; - const newList = [] as IDataRow[]; + const handelMasterProxyChange = (data: InstanceSelectorValues) => { + selectedIps.value = data; + const newList = [] as IDataRow []; data.tendbha.forEach((proxyData) => { const { bk_host_id, bk_cloud_id, instance_address: instanceAddress } = proxyData; const [ip] = instanceAddress.split(':'); @@ -188,6 +195,12 @@ }; // 删除一个集群 const handleRemove = (index: number) => { + const ip = tableData.value[index].masterData?.ip; + if (ip) { + delete ipMemo[ip]; + const clustersArr = selectedIps.value[ClusterTypes.TENDBHA]; + selectedIps.value[ClusterTypes.TENDBHA] = clustersArr.filter(item => item.ip !== ip); + } const dataList = [...tableData.value]; dataList.splice(index, 1); tableData.value = dataList; @@ -226,6 +239,9 @@ const handleReset = () => { tableData.value = [createRowData()]; + ipMemo = {}; + selectedIps.value[ClusterTypes.TENDBHA] = []; + window.changeConfirm = false; }; diff --git a/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/Index.vue b/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/Index.vue index 7afc306328..4baec735fc 100644 --- a/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/Index.vue +++ b/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/Index.vue @@ -17,7 +17,7 @@ + :title="t('迁移主从:集群主从实例将成对迁移至新机器。默认迁移同机所有关联集群,也可迁移部分集群,迁移会下架旧实例')" />
diff --git a/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page2/Index.vue b/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page2/Index.vue index 7375be3003..0882b2d7a2 100644 --- a/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page2/Index.vue +++ b/dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page2/Index.vue @@ -14,7 +14,7 @@ @@ -74,13 +75,17 @@ import { useI18n } from 'vue-i18n'; import { useRouter } from 'vue-router'; + import TendbhaInstanceModel from '@services/model/mysql/tendbha-instance'; import { createTicket } from '@services/source/ticket'; import { useGlobalBizs } from '@stores'; import { ClusterTypes } from '@common/const'; - import InstanceSelector, { type InstanceSelectorValues } from '@components/instance-selector/Index.vue'; + import InstanceSelector, { + type InstanceSelectorValues, + type PanelListType, + } from '@components/instance-selector/Index.vue'; import RenderData from './components/RenderData/Index.vue'; import RenderDataRow, { createRowData, type IDataRow } from './components/RenderData/Row.vue'; @@ -95,17 +100,17 @@ const isSubmitting = ref(false); const tableData = shallowRef>([createRowData({})]); + const selectedIps = shallowRef>({ [ClusterTypes.TENDBHA]: [] }); - const panelList = [ - { - id: 'tendbha', - title: t('待重建从库主机'), - }, - { - id: 'manualInput', - title: t('手动输入'), - }, - ]; + const tabListConfig = { + [ClusterTypes.TENDBHA]: [ + { + name: t('待重建从库主机'), + }, + ], + } as unknown as Record; + + let ipMemo = {} as Record; // 检测列表是否为空 const checkListEmpty = (list: Array) => { @@ -121,20 +126,27 @@ isShowInstanceSelecotr.value = true; }; - const handleInstancesChange = (selected: InstanceSelectorValues) => { - const newList = selected[ClusterTypes.TENDBHA].map((instanceData) => - createRowData({ - oldSlave: { - bkCloudId: instanceData.bk_cloud_id, - bkCloudName: instanceData.bk_cloud_name, - bkHostId: instanceData.bk_host_id, - ip: instanceData.ip, - port: instanceData.port, - instanceAddress: instanceData.instance_address, - clusterId: instanceData.cluster_id, - }, - }), - ); + const handleInstancesChange = (selected: InstanceSelectorValues) => { + selectedIps.value = selected; + const newList: IDataRow[] = []; + selected[ClusterTypes.TENDBHA].forEach((instanceData) => { + const { ip } = instanceData; + if (!ipMemo[ip]) { + const row = createRowData({ + oldSlave: { + bkCloudId: instanceData.bk_cloud_id, + bkCloudName: instanceData.bk_cloud_name, + bkHostId: instanceData.bk_host_id, + ip, + port: instanceData.port, + instanceAddress: instanceData.instance_address, + clusterId: instanceData.cluster_id, + }, + }); + newList.push(row); + ipMemo[ip] = true; + } + }); if (checkListEmpty(tableData.value)) { tableData.value = newList; @@ -153,6 +165,12 @@ // 删除一个行 const handleRemove = (index: number) => { + const ip = tableData.value[index].oldSlave?.ip; + if (ip) { + delete ipMemo[ip]; + const clustersArr = selectedIps.value[ClusterTypes.TENDBHA]; + selectedIps.value[ClusterTypes.TENDBHA] = clustersArr.filter(item => item.ip !== ip); + } const dataList = [...tableData.value]; dataList.splice(index, 1); tableData.value = dataList; @@ -191,6 +209,8 @@ const handleReset = () => { tableData.value = [createRowData()]; + ipMemo = {}; + selectedIps.value[ClusterTypes.TENDBHA] = []; window.changeConfirm = false; }; diff --git a/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/new-host/components/RenderData/RenderCluster.vue b/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/new-host/components/RenderData/RenderCluster.vue index eb42c850c0..006d6a1f3d 100644 --- a/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/new-host/components/RenderData/RenderCluster.vue +++ b/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/new-host/components/RenderData/RenderCluster.vue @@ -13,11 +13,21 @@ + diff --git a/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/original-host/Index.vue b/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/original-host/Index.vue index 732056062e..736489ec86 100644 --- a/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/original-host/Index.vue +++ b/dbm-ui/frontend/src/views/mysql/slave-rebuild/pages/page1/components/original-host/Index.vue @@ -64,8 +64,9 @@ @@ -74,13 +75,17 @@ import { useI18n } from 'vue-i18n'; import { useRouter } from 'vue-router'; + import TendbhaInstanceModel from '@services/model/mysql/tendbha-instance'; import { createTicket } from '@services/source/ticket'; import { useGlobalBizs } from '@stores'; import { ClusterTypes } from '@common/const'; - import InstanceSelector, { type InstanceSelectorValues } from '@components/instance-selector/Index.vue'; + import InstanceSelector, { + type InstanceSelectorValues, + type PanelListType, + } from '@components/instance-selector/Index.vue'; import RenderData from './components/RenderData/Index.vue'; import RenderDataRow, { createRowData, type IDataRow } from './components/RenderData/Row.vue'; @@ -95,17 +100,17 @@ const isSubmitting = ref(false); const tableData = shallowRef>([createRowData({})]); + const selectedIntances = shallowRef>({ [ClusterTypes.TENDBHA]: [] }); - const panelList = [ - { - id: 'tendbha', - title: t('目标从库实例'), - }, - { - id: 'manualInput', - title: t('手动输入'), - }, - ]; + const tabListConfig = { + [ClusterTypes.TENDBHA]: [ + { + name: t('目标从库实例'), + }, + ], + } as unknown as Record; + + let instanceMemo = {} as Record; // 检测列表是否为空 const checkListEmpty = (list: Array) => { @@ -121,19 +126,26 @@ isShowInstanceSelecotr.value = true; }; - const handleInstancesChange = (selected: InstanceSelectorValues) => { - const newList = selected[ClusterTypes.TENDBHA].map((instanceData) => - createRowData({ - slave: { - bkCloudId: instanceData.bk_cloud_id, - bkHostId: instanceData.bk_host_id, - ip: instanceData.ip, - port: instanceData.port, - instanceAddress: instanceData.instance_address, - clusterId: instanceData.cluster_id, - }, - }), - ); + const handleInstancesChange = (selected: InstanceSelectorValues) => { + selectedIntances.value = selected; + const newList: IDataRow[] = []; + selected[ClusterTypes.TENDBHA].forEach((instanceData) => { + const { instance_address: instance } = instanceData; + if (!instanceMemo[instance]) { + const row = createRowData({ + slave: { + bkCloudId: instanceData.bk_cloud_id, + bkHostId: instanceData.bk_host_id, + ip: instanceData.ip, + port: instanceData.port, + instanceAddress: instanceData.instance_address, + clusterId: instanceData.cluster_id, + }, + }); + newList.push(row); + instanceMemo[instance] = true; + } + }); if (checkListEmpty(tableData.value)) { tableData.value = newList; @@ -152,6 +164,13 @@ // 删除一个行 const handleRemove = (index: number) => { + const instanceAddress = tableData.value[index].slave?.instanceAddress; + if (instanceAddress) { + delete instanceMemo[instanceAddress]; + const clustersArr = selectedIntances.value[ClusterTypes.TENDBHA]; + // eslint-disable-next-line max-len + selectedIntances.value[ClusterTypes.TENDBHA] = clustersArr.filter(item => item.instance_address !== instanceAddress); + } const dataList = [...tableData.value]; dataList.splice(index, 1); tableData.value = dataList; @@ -190,6 +209,8 @@ const handleReset = () => { tableData.value = [createRowData()]; + instanceMemo = {}; + selectedIntances.value[ClusterTypes.TENDBHA] = []; window.changeConfirm = false; }; diff --git a/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/Index.vue b/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/Index.vue index 5550d47499..55127fef85 100644 --- a/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/Index.vue +++ b/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/Index.vue @@ -32,7 +32,7 @@ {{ - $t( + t( '仅支持_sql文件_文件名不能包含空格_上传后_SQL执行顺序默认为从上至下_可拖动文件位置_变换文件的执行顺序文件', ) }} @@ -262,7 +262,6 @@ Array.from(files).forEach((curFile) => { - fileNameList.push(curFile.name); currentFileDataMap[curFile.name] = createFileData({ file: curFile, @@ -284,7 +283,7 @@ grammarCheck: undefined, }; - return + return; } params.append('sql_files', curFile); }); diff --git a/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/FileList.vue b/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/FileList.vue index 0320d11bce..8eebe6bb6e 100644 --- a/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/FileList.vue +++ b/dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/FileList.vue @@ -15,7 +15,7 @@
{{ t('文件列表') }} - + {{ t('按顺序执行') }}
diff --git a/dbm-ui/frontend/src/views/mysql/toolbox-menu.ts b/dbm-ui/frontend/src/views/mysql/toolbox-menu.ts index 62335b7923..c77463d1df 100644 --- a/dbm-ui/frontend/src/views/mysql/toolbox-menu.ts +++ b/dbm-ui/frontend/src/views/mysql/toolbox-menu.ts @@ -60,7 +60,7 @@ export default [ icon: 'db-icon-rollback', children: [ { - name: t('定点回档'), + name: t('定点构造'), id: 'MySQLDBRollback', parentId: 'fileback', }, @@ -104,7 +104,7 @@ export default [ parentId: 'migrate', }, { - name: t('克隆主从'), + name: t('迁移主从'), id: 'MySQLMasterSlaveClone', parentId: 'migrate', }, diff --git a/dbm-ui/frontend/src/views/mysql/toolbox/components/toolbox-side/Index.vue b/dbm-ui/frontend/src/views/mysql/toolbox/components/toolbox-side/Index.vue index 65b84bda0d..40bd9b1981 100644 --- a/dbm-ui/frontend/src/views/mysql/toolbox/components/toolbox-side/Index.vue +++ b/dbm-ui/frontend/src/views/mysql/toolbox/components/toolbox-side/Index.vue @@ -32,8 +32,9 @@ -