From 5bf733b1e7432e6b82fb93acfa9029136828a028 Mon Sep 17 00:00:00 2001 From: "Jose A. Rivera" Date: Wed, 8 May 2024 10:59:58 -0500 Subject: [PATCH] vendor: volume migration initial commit Signed-off-by: Jose A. Rivera --- go.sum | 28 +- metrics/go.mod | 6 +- metrics/go.sum | 12 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1362 ++++++++---- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- metrics/vendor/go.uber.org/zap/.golangci.yml | 2 +- metrics/vendor/go.uber.org/zap/.readme.tmpl | 10 +- metrics/vendor/go.uber.org/zap/CHANGELOG.md | 54 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 metrics/vendor/go.uber.org/zap/README.md | 66 +- .../vendor/go.uber.org/zap/buffer/buffer.go | 2 +- metrics/vendor/go.uber.org/zap/field.go | 2 + metrics/vendor/go.uber.org/zap/logger.go | 39 +- metrics/vendor/go.uber.org/zap/options.go | 15 + metrics/vendor/go.uber.org/zap/sugar.go | 39 + .../zap/zapcore/console_encoder.go | 2 +- .../vendor/go.uber.org/zap/zapcore/encoder.go | 15 + .../vendor/go.uber.org/zap/zapcore/field.go | 2 +- .../go.uber.org/zap/zapcore/json_encoder.go | 2 +- metrics/vendor/modules.txt | 8 +- tools/volume-migration/main.go | 4 + vendor/github.com/ceph/go-ceph/LICENSE | 21 + .../go-ceph/internal/callbacks/callbacks.go | 65 + .../ceph/go-ceph/internal/cutil/aliases.go | 66 + .../go-ceph/internal/cutil/buffergroup.go | 89 + .../go-ceph/internal/cutil/command_input.go | 62 + .../go-ceph/internal/cutil/command_output.go | 100 + .../ceph/go-ceph/internal/cutil/cslice.go | 76 + .../ceph/go-ceph/internal/cutil/iovec.go | 60 + .../ceph/go-ceph/internal/cutil/ptrguard.go | 37 + .../go-ceph/internal/cutil/ptrguard_pinner.go | 35 + .../ceph/go-ceph/internal/cutil/splitbuf.go | 49 + .../go-ceph/internal/cutil/sync_buffer.go | 29 + .../internal/cutil/sync_buffer_memcpy.go | 38 + .../ceph/go-ceph/internal/errutil/strerror.go | 52 + .../ceph/go-ceph/internal/log/log.go | 14 + .../ceph/go-ceph/internal/retry/sizer.go | 64 + .../go-ceph/internal/timespec/timespec.go | 39 + .../ceph/go-ceph/rados/alloc_hint_flags.go | 34 + .../github.com/ceph/go-ceph/rados/command.go | 198 ++ vendor/github.com/ceph/go-ceph/rados/conn.go | 313 +++ vendor/github.com/ceph/go-ceph/rados/doc.go | 4 + .../github.com/ceph/go-ceph/rados/errors.go | 86 + vendor/github.com/ceph/go-ceph/rados/ioctx.go | 725 +++++++ .../ceph/go-ceph/rados/ioctx_nautilus.go | 37 + .../ceph/go-ceph/rados/ioctx_octopus.go | 40 + .../go-ceph/rados/ioctx_pool_alignment.go | 25 + .../rados/ioctx_pool_requires_alignment.go | 25 + .../go-ceph/rados/ioctx_set_alloc_hint.go | 36 + .../ceph/go-ceph/rados/object_iter.go | 92 + vendor/github.com/ceph/go-ceph/rados/omap.go | 205 ++ .../ceph/go-ceph/rados/operation.go | 154 ++ .../ceph/go-ceph/rados/operation_flags.go | 37 + vendor/github.com/ceph/go-ceph/rados/rados.go | 130 ++ .../ceph/go-ceph/rados/rados_nautilus.go | 13 + .../rados/rados_read_op_assert_version.go | 19 + .../ceph/go-ceph/rados/rados_set_locator.go | 28 + .../rados/rados_write_op_assert_version.go | 19 + .../go-ceph/rados/rados_write_op_remove.go | 16 + .../go-ceph/rados/rados_write_op_setxattr.go | 31 + .../github.com/ceph/go-ceph/rados/read_op.go | 91 + .../rados/read_op_omap_get_vals_by_keys.go | 113 + .../ceph/go-ceph/rados/read_op_read.go | 72 + .../ceph/go-ceph/rados/read_step.go | 31 + .../github.com/ceph/go-ceph/rados/snapshot.go | 196 ++ .../github.com/ceph/go-ceph/rados/watcher.go | 375 ++++ .../github.com/ceph/go-ceph/rados/write_op.go | 199 ++ .../ceph/go-ceph/rados/write_op_cmpext.go | 60 + .../go-ceph/rados/write_op_set_alloc_hint.go | 26 + .../ceph/go-ceph/rados/write_step.go | 33 + .../ceph/go-ceph/rbd/diff_iterate.go | 131 ++ vendor/github.com/ceph/go-ceph/rbd/doc.go | 4 + .../github.com/ceph/go-ceph/rbd/encryption.go | 142 ++ vendor/github.com/ceph/go-ceph/rbd/errors.go | 84 + .../github.com/ceph/go-ceph/rbd/features.go | 187 ++ .../ceph/go-ceph/rbd/features_nautilus.go | 18 + vendor/github.com/ceph/go-ceph/rbd/group.go | 267 +++ .../github.com/ceph/go-ceph/rbd/group_snap.go | 229 ++ vendor/github.com/ceph/go-ceph/rbd/locks.go | 140 ++ .../github.com/ceph/go-ceph/rbd/metadata.go | 153 ++ .../github.com/ceph/go-ceph/rbd/migration.go | 219 ++ vendor/github.com/ceph/go-ceph/rbd/mirror.go | 1058 +++++++++ .../ceph/go-ceph/rbd/mirror_desc_status.go | 72 + .../ceph/go-ceph/rbd/mirror_nautilus.go | 420 ++++ .../ceph/go-ceph/rbd/mirror_peer_site.go | 255 +++ .../ceph/go-ceph/rbd/namespace_nautilus.go | 109 + vendor/github.com/ceph/go-ceph/rbd/options.go | 241 +++ .../ceph/go-ceph/rbd/options_octopus.go | 13 + .../ceph/go-ceph/rbd/pool_nautilus.go | 224 ++ vendor/github.com/ceph/go-ceph/rbd/rbd.go | 1325 ++++++++++++ .../ceph/go-ceph/rbd/rbd_nautilus.go | 118 ++ vendor/github.com/ceph/go-ceph/rbd/resize.go | 77 + .../github.com/ceph/go-ceph/rbd/snapshot.go | 193 ++ .../ceph/go-ceph/rbd/snapshot_namespace.go | 81 + .../ceph/go-ceph/rbd/snapshot_nautilus.go | 184 ++ .../ceph/go-ceph/rbd/snapshot_octopus.go | 69 + .../ceph/go-ceph/rbd/snapshot_rename.go | 35 + .../github.com/ceph/go-ceph/rbd/sparsify.go | 89 + .../github.com/ceph/go-ceph/rbd/watchers.go | 145 ++ .../inconshreveable/mousetrap/LICENSE | 201 ++ .../inconshreveable/mousetrap/README.md | 23 + .../inconshreveable/mousetrap/trap_others.go | 16 + .../inconshreveable/mousetrap/trap_windows.go | 42 + vendor/github.com/rook/rook/LICENSE | 201 ++ .../client/clientset/versioned/clientset.go | 97 + .../pkg/client/clientset/versioned/doc.go | 20 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../ceph.rook.io/v1/ceph.rook.io_client.go | 169 ++ .../typed/ceph.rook.io/v1/cephblockpool.go | 178 ++ .../v1/cephblockpoolradosnamespace.go | 178 ++ .../ceph.rook.io/v1/cephbucketnotification.go | 178 ++ .../typed/ceph.rook.io/v1/cephbuckettopic.go | 178 ++ .../typed/ceph.rook.io/v1/cephclient.go | 178 ++ .../typed/ceph.rook.io/v1/cephcluster.go | 178 ++ .../typed/ceph.rook.io/v1/cephcosidriver.go | 178 ++ .../typed/ceph.rook.io/v1/cephfilesystem.go | 178 ++ .../ceph.rook.io/v1/cephfilesystemmirror.go | 178 ++ .../v1/cephfilesystemsubvolumegroup.go | 178 ++ .../typed/ceph.rook.io/v1/cephnfs.go | 178 ++ .../typed/ceph.rook.io/v1/cephobjectrealm.go | 178 ++ .../typed/ceph.rook.io/v1/cephobjectstore.go | 178 ++ .../ceph.rook.io/v1/cephobjectstoreuser.go | 178 ++ .../typed/ceph.rook.io/v1/cephobjectzone.go | 178 ++ .../ceph.rook.io/v1/cephobjectzonegroup.go | 178 ++ .../typed/ceph.rook.io/v1/cephrbdmirror.go | 178 ++ .../versioned/typed/ceph.rook.io/v1/doc.go | 20 + .../ceph.rook.io/v1/generated_expansion.go | 53 + vendor/github.com/spf13/cobra/.gitignore | 39 + vendor/github.com/spf13/cobra/.golangci.yml | 62 + vendor/github.com/spf13/cobra/.mailmap | 3 + vendor/github.com/spf13/cobra/CONDUCT.md | 37 + vendor/github.com/spf13/cobra/CONTRIBUTING.md | 50 + vendor/github.com/spf13/cobra/LICENSE.txt | 174 ++ vendor/github.com/spf13/cobra/MAINTAINERS | 13 + vendor/github.com/spf13/cobra/Makefile | 35 + vendor/github.com/spf13/cobra/README.md | 112 + vendor/github.com/spf13/cobra/active_help.go | 67 + vendor/github.com/spf13/cobra/args.go | 131 ++ .../spf13/cobra/bash_completions.go | 712 +++++++ .../spf13/cobra/bash_completionsV2.go | 396 ++++ vendor/github.com/spf13/cobra/cobra.go | 244 +++ vendor/github.com/spf13/cobra/command.go | 1885 +++++++++++++++++ .../github.com/spf13/cobra/command_notwin.go | 20 + vendor/github.com/spf13/cobra/command_win.go | 41 + vendor/github.com/spf13/cobra/completions.go | 901 ++++++++ .../spf13/cobra/fish_completions.go | 292 +++ vendor/github.com/spf13/cobra/flag_groups.go | 290 +++ .../spf13/cobra/powershell_completions.go | 325 +++ .../spf13/cobra/shell_completions.go | 98 + .../github.com/spf13/cobra/zsh_completions.go | 308 +++ vendor/go.uber.org/zap/.golangci.yml | 2 +- vendor/go.uber.org/zap/.readme.tmpl | 10 +- vendor/go.uber.org/zap/CHANGELOG.md | 54 +- .../go.uber.org/zap/{LICENSE.txt => LICENSE} | 0 vendor/go.uber.org/zap/README.md | 66 +- vendor/go.uber.org/zap/buffer/buffer.go | 2 +- vendor/go.uber.org/zap/field.go | 2 + vendor/go.uber.org/zap/logger.go | 39 +- vendor/go.uber.org/zap/options.go | 15 + vendor/go.uber.org/zap/sugar.go | 39 + .../zap/zapcore/console_encoder.go | 2 +- vendor/go.uber.org/zap/zapcore/encoder.go | 15 + vendor/go.uber.org/zap/zapcore/field.go | 2 +- .../go.uber.org/zap/zapcore/json_encoder.go | 2 +- vendor/modules.txt | 29 +- 166 files changed, 22210 insertions(+), 561 deletions(-) rename metrics/vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) create mode 100644 vendor/github.com/ceph/go-ceph/LICENSE create mode 100644 vendor/github.com/ceph/go-ceph/internal/callbacks/callbacks.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/aliases.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/buffergroup.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/command_input.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/command_output.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/cslice.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/iovec.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard_pinner.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/splitbuf.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer_memcpy.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/errutil/strerror.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/log/log.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/retry/sizer.go create mode 100644 vendor/github.com/ceph/go-ceph/internal/timespec/timespec.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/alloc_hint_flags.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/command.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/conn.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/doc.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/errors.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/ioctx.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/ioctx_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/ioctx_octopus.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/ioctx_pool_alignment.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/ioctx_pool_requires_alignment.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/ioctx_set_alloc_hint.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/object_iter.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/omap.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/operation.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/operation_flags.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados_read_op_assert_version.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados_set_locator.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados_write_op_assert_version.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados_write_op_remove.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/rados_write_op_setxattr.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/read_op.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/read_op_omap_get_vals_by_keys.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/read_op_read.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/read_step.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/snapshot.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/watcher.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/write_op.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/write_op_cmpext.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/write_op_set_alloc_hint.go create mode 100644 vendor/github.com/ceph/go-ceph/rados/write_step.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/diff_iterate.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/doc.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/encryption.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/errors.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/features.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/features_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/group.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/group_snap.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/locks.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/metadata.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/migration.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/mirror.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/mirror_desc_status.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/mirror_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/mirror_peer_site.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/namespace_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/options.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/options_octopus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/pool_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/rbd.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/rbd_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/resize.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/snapshot.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/snapshot_namespace.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/snapshot_nautilus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/snapshot_octopus.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/snapshot_rename.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/sparsify.go create mode 100644 vendor/github.com/ceph/go-ceph/rbd/watchers.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/LICENSE create mode 100644 vendor/github.com/inconshreveable/mousetrap/README.md create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_others.go create mode 100644 vendor/github.com/inconshreveable/mousetrap/trap_windows.go create mode 100644 vendor/github.com/rook/rook/LICENSE create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbucketnotification.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbuckettopic.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcosidriver.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go create mode 100644 vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go create mode 100644 vendor/github.com/spf13/cobra/.gitignore create mode 100644 vendor/github.com/spf13/cobra/.golangci.yml create mode 100644 vendor/github.com/spf13/cobra/.mailmap create mode 100644 vendor/github.com/spf13/cobra/CONDUCT.md create mode 100644 vendor/github.com/spf13/cobra/CONTRIBUTING.md create mode 100644 vendor/github.com/spf13/cobra/LICENSE.txt create mode 100644 vendor/github.com/spf13/cobra/MAINTAINERS create mode 100644 vendor/github.com/spf13/cobra/Makefile create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/active_help.go create mode 100644 vendor/github.com/spf13/cobra/args.go create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go create mode 100644 vendor/github.com/spf13/cobra/bash_completionsV2.go create mode 100644 vendor/github.com/spf13/cobra/cobra.go create mode 100644 vendor/github.com/spf13/cobra/command.go create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/cobra/completions.go create mode 100644 vendor/github.com/spf13/cobra/fish_completions.go create mode 100644 vendor/github.com/spf13/cobra/flag_groups.go create mode 100644 vendor/github.com/spf13/cobra/powershell_completions.go create mode 100644 vendor/github.com/spf13/cobra/shell_completions.go create mode 100644 vendor/github.com/spf13/cobra/zsh_completions.go rename vendor/go.uber.org/zap/{LICENSE.txt => LICENSE} (100%) diff --git a/go.sum b/go.sum index 4b906ef2c6..08ebfac347 100644 --- a/go.sum +++ b/go.sum @@ -135,6 +135,8 @@ github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/ceph/ceph-csi/api v0.0.0-20240322131550-063319f6e516 h1:yjkEXQehM/S+TvmHiXXz/EYMPJ23vtSnOeu3+IeCVss= github.com/ceph/ceph-csi/api v0.0.0-20240322131550-063319f6e516/go.mod h1:joNF3+cwdiRd/26hv849wyLwnXNtUtx8sUisMTC/3dk= +github.com/ceph/go-ceph v0.26.0 h1:LZoATo25ZH5aeL5t85BwIbrNLKCDfcDM+e0qV0cmwHY= +github.com/ceph/go-ceph v0.26.0/go.mod h1:ISxb295GszZwtLPkeWi+L2uLYBVsqbsh0M104jZMOX4= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -169,6 +171,7 @@ github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -389,6 +392,8 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -590,6 +595,8 @@ github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= @@ -839,10 +846,13 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rook/rook v1.14.3 h1:te73EGuFQ+9akw0kocUvbNqsICodZZ6LRH379Y/S3CI= +github.com/rook/rook v1.14.3/go.mod h1:uch2qrF9JGZhYZAbFtGTI30PKw5GxTl1cjbCjInQELs= github.com/rook/rook/pkg/apis v0.0.0-20240529164429-48b657099d3c h1:Khn/QJ35Qd07NbiqAL1QBfYpfq4/F4LXTRRv0wLjjUg= github.com/rook/rook/pkg/apis v0.0.0-20240529164429-48b657099d3c/go.mod h1:X/tPOJyDiIcJG75MLdPIYbZ4gKK3WMOyCb6ErPWSG0k= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= @@ -868,6 +878,8 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -954,8 +966,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1613,8 +1625,8 @@ k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= k8s.io/apiextensions-apiserver v0.18.3/go.mod h1:TMsNGs7DYpMXd+8MOCX8KzPOCx8fnZMoIGB24m03+JE= k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk= -k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= -k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= +k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= +k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= @@ -1629,8 +1641,8 @@ k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= k8s.io/apiserver v0.18.3/go.mod h1:tHQRmthRPLUtwqsOnJJMoI8SW3lnoReZeE861lH8vUw= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= -k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= @@ -1646,8 +1658,8 @@ k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbW k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.18.3/go.mod h1:bp5GzGR0aGkYEfTj+eTY0AN/vXTgkJdQXjNTTVUaa3k= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= -k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/metrics/go.mod b/metrics/go.mod index fd8eccbd33..2050370f64 100644 --- a/metrics/go.mod +++ b/metrics/go.mod @@ -37,11 +37,11 @@ require ( github.com/prometheus/client_model v0.6.0 github.com/red-hat-storage/ocs-operator/api/v4 v4.0.0-20240327160100-bbe9d9d49462 github.com/red-hat-storage/ocs-operator/v4 v4.0.0-00010101000000-000000000000 - github.com/rook/rook v1.13.7 + github.com/rook/rook v1.14.3 github.com/rook/rook/pkg/apis v0.0.0-20240529164429-48b657099d3c github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 golang.org/x/net v0.23.0 k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 @@ -51,7 +51,7 @@ require ( require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/aws/aws-sdk-go v1.50.9 // indirect + github.com/aws/aws-sdk-go v1.51.7 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/metrics/go.sum b/metrics/go.sum index 5f5e8598f2..4fe4296aef 100644 --- a/metrics/go.sum +++ b/metrics/go.sum @@ -110,8 +110,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.164/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go v1.50.9 h1:yX66aKnEtRc/uNV/1EH8CudRT5aLwVwcSwTBphuVPt8= -github.com/aws/aws-sdk-go v1.50.9/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.51.7 h1:RRjxHhx9RCjw5AhgpmmShq3F4JDlleSkyhYMQ2xUAe8= +github.com/aws/aws-sdk-go v1.51.7/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -716,8 +716,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rook/rook v1.13.7 h1:TsDHWlh/+8zwcRuG6GX42XJ+pZ+qpOzmloLWS9InBoY= -github.com/rook/rook v1.13.7/go.mod h1:e63s98WVRahwUGkACfZG/R+t8MGwGzU5L7+RbDKZhJw= +github.com/rook/rook v1.14.3 h1:te73EGuFQ+9akw0kocUvbNqsICodZZ6LRH379Y/S3CI= +github.com/rook/rook v1.14.3/go.mod h1:uch2qrF9JGZhYZAbFtGTI30PKw5GxTl1cjbCjInQELs= github.com/rook/rook/pkg/apis v0.0.0-20240529164429-48b657099d3c h1:Khn/QJ35Qd07NbiqAL1QBfYpfq4/F4LXTRRv0wLjjUg= github.com/rook/rook/pkg/apis v0.0.0-20240529164429-48b657099d3c/go.mod h1:X/tPOJyDiIcJG75MLdPIYbZ4gKK3WMOyCb6ErPWSG0k= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -815,8 +815,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/metrics/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/metrics/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 10137074c6..b6d122c154 100644 --- a/metrics/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/metrics/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -661,6 +661,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -694,6 +703,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -3758,6 +3776,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "athena.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4018,278 +4045,82 @@ var awsPartition = partition{ Region: "us-east-1", }: endpoint{}, endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "autoscaling": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - Protocols: []string{"http", "https"}, - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-south-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", - }, - endpointKey{ - Region: "ca-west-1", - }: endpoint{}, - endpointKey{ - Region: "ca-west-1", + Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", - }, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-2", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "fips-ca-central-1", - }: endpoint{ - Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-central-1", - }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "fips-ca-west-1", - }: endpoint{ - Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ca-west-1", - }, - Deprecated: boxedTrue, + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", }, endpointKey{ - Region: "fips-us-east-1", + Region: "us-east-1-fips", }: endpoint{ - Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-east-2", - }: endpoint{ - Hostname: "autoscaling-fips.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - Deprecated: boxedTrue, - }, + Region: "us-east-2", + }: endpoint{}, endpointKey{ - Region: "fips-us-west-1", + Region: "us-east-2", + Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-fips.us-west-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-west-1", - }, - Deprecated: boxedTrue, + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", }, endpointKey{ - Region: "fips-us-west-2", + Region: "us-east-2-fips", }: endpoint{ - Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-west-2", + Region: "us-east-2", }, Deprecated: boxedTrue, }, endpointKey{ - Region: "il-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-central-1", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", + Region: "us-west-1", }: endpoint{}, endpointKey{ - Region: "us-east-1", + Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", }, endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - Variant: fipsVariant, + Region: "us-west-1-fips", }: endpoint{ - Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-west-1", + Region: "us-west-2", }: endpoint{}, endpointKey{ - Region: "us-west-1", + Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", }, endpointKey{ - Region: "us-west-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - Variant: fipsVariant, + Region: "us-west-2-fips", }: endpoint{ - Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, }, }, }, - "autoscaling-plans": service{ + "autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ Protocols: []string{"http", "https"}, }, }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-east-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-northeast-3", - }: endpoint{}, - endpointKey{ - Region: "ap-south-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-1", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-2", - }: endpoint{}, - endpointKey{ - Region: "ap-southeast-3", - }: endpoint{}, - endpointKey{ - Region: "ca-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "eu-north-1", - }: endpoint{}, - endpointKey{ - Region: "eu-south-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-1", - }: endpoint{}, - endpointKey{ - Region: "eu-west-2", - }: endpoint{}, - endpointKey{ - Region: "eu-west-3", - }: endpoint{}, - endpointKey{ - Region: "me-south-1", - }: endpoint{}, - endpointKey{ - Region: "sa-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-2", - }: endpoint{}, - endpointKey{ - Region: "us-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-west-2", - }: endpoint{}, - }, - }, - "backup": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", @@ -4327,6 +4158,265 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5853,6 +5943,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6774,6 +6867,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -6828,6 +6924,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -6904,6 +7003,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -6958,6 +7060,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7137,12 +7242,27 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -10263,6 +10383,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ec2-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10302,6 +10431,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11310,6 +11448,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11490,6 +11637,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-eu-central-1", }: endpoint{ @@ -12282,6 +12438,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -12430,6 +12589,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -14437,6 +14599,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14866,6 +15031,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -15155,6 +15323,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -15182,6 +15353,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -16198,16 +16372,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "iotroborunner": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "eu-central-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "iotsecuredtunneling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -17488,6 +17652,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18163,6 +18330,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19096,66 +19266,222 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -19195,18 +19521,48 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -19216,6 +19572,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -19225,6 +19587,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -19234,6 +19602,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -19864,6 +20238,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -19873,6 +20250,12 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21698,6 +22081,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -22059,12 +22450,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -22841,6 +23238,14 @@ var awsPartition = partition{ Region: "ap-southeast-3", }, }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, endpointKey{ Region: "ca-central-1", }: endpoint{ @@ -22971,6 +23376,19 @@ var awsPartition = partition{ }, }, }, + "private-networks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "profile": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -24433,6 +24851,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24448,18 +24872,87 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, }, }, "rekognition": service{ @@ -24783,153 +25276,64 @@ var awsPartition = partition{ }, }, "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, Endpoints: serviceEndpoints{ - endpointKey{ - Region: "af-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.af-south-1.api.aws", - }, - endpointKey{ - Region: "ap-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-east-1.api.aws", - }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-northeast-3", - }: endpoint{ - Hostname: "resource-explorer-2.ap-northeast-3.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-south-1.api.aws", - }, - endpointKey{ - Region: "ap-south-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-south-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ap-southeast-3", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-3.api.aws", - }, - endpointKey{ - Region: "ap-southeast-4", - }: endpoint{ - Hostname: "resource-explorer-2.ap-southeast-4.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "ca-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.ca-central-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-central-1.api.aws", - }, - endpointKey{ - Region: "eu-central-2", - }: endpoint{ - Hostname: "resource-explorer-2.eu-central-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-north-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-north-1.api.aws", - }, - endpointKey{ - Region: "eu-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-south-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-2", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "eu-west-3", - }: endpoint{ - Hostname: "resource-explorer-2.eu-west-3.api.aws", - }, - endpointKey{ - Region: "il-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.il-central-1.api.aws", - }, - endpointKey{ - Region: "me-central-1", - }: endpoint{ - Hostname: "resource-explorer-2.me-central-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "me-south-1", - }: endpoint{ - Hostname: "resource-explorer-2.me-south-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "sa-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.sa-east-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "us-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-east-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "us-east-2", - }: endpoint{ - Hostname: "resource-explorer-2.us-east-2.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "us-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-west-1.api.aws", - }, + }: endpoint{}, endpointKey{ Region: "us-west-2", - }: endpoint{ - Hostname: "resource-explorer-2.us-west-2.api.aws", - }, + }: endpoint{}, }, }, "resource-groups": service{ @@ -25146,6 +25550,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29047,12 +29454,18 @@ var awsPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -29086,6 +29499,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, endpointKey{ Region: "eu-south-2", }: endpoint{}, @@ -29095,6 +29514,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -29113,6 +29535,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -29128,6 +29568,12 @@ var awsPartition = partition{ endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -29137,6 +29583,24 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -30383,6 +30847,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -35480,6 +35947,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -35792,6 +36269,16 @@ var awscnPartition = partition{ }, }, }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "oam": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35969,31 +36456,6 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.amazonwebservices.com.cn", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.amazonwebservices.com.cn", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "cn-north-1", - }: endpoint{ - Hostname: "resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn", - }, - endpointKey{ - Region: "cn-northwest-1", - }: endpoint{ - Hostname: "resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn", - }, - }, - }, "resource-groups": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39079,6 +39541,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "es": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -40683,6 +41155,13 @@ var awsusgovPartition = partition{ }, }, }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "models.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -41409,31 +41888,6 @@ var awsusgovPartition = partition{ }, }, }, - "resource-explorer-2": service{ - Defaults: endpointDefaults{ - defaultKey{}: endpoint{ - DNSSuffix: "api.aws", - }, - defaultKey{ - Variant: fipsVariant, - }: endpoint{ - Hostname: "{service}-fips.{region}.{dnsSuffix}", - DNSSuffix: "api.aws", - }, - }, - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "us-gov-east-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-gov-east-1.api.aws", - }, - endpointKey{ - Region: "us-gov-west-1", - }: endpoint{ - Hostname: "resource-explorer-2.us-gov-west-1.api.aws", - }, - }, - }, "resource-groups": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -41597,6 +42051,13 @@ var awsusgovPartition = partition{ }, }, }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "runtime.lex": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -42255,6 +42716,16 @@ var awsusgovPartition = partition{ }, }, }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "simspaceweaver": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42319,6 +42790,15 @@ var awsusgovPartition = partition{ }, "sms-voice": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-gov-west-1", }: endpoint{ @@ -42328,6 +42808,15 @@ var awsusgovPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, @@ -43326,6 +43815,20 @@ var awsisoPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43375,6 +43878,16 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, "athena": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44430,6 +44943,13 @@ var awsisoPartition = partition{ }: endpoint{}, }, }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, "transcribe": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -44522,6 +45042,20 @@ var awsisobPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -44555,6 +45089,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "autoscaling": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -45163,6 +45704,37 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, "streams.dynamodb": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ diff --git a/metrics/vendor/github.com/aws/aws-sdk-go/aws/version.go b/metrics/vendor/github.com/aws/aws-sdk-go/aws/version.go index 8f06625b54..4667425361 100644 --- a/metrics/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/metrics/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.50.9" +const SDKVersion = "1.51.7" diff --git a/metrics/vendor/go.uber.org/zap/.golangci.yml b/metrics/vendor/go.uber.org/zap/.golangci.yml index fbc6df7906..2346df1351 100644 --- a/metrics/vendor/go.uber.org/zap/.golangci.yml +++ b/metrics/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/metrics/vendor/go.uber.org/zap/.readme.tmpl b/metrics/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d660..4fea3027af 100644 --- a/metrics/vendor/go.uber.org/zap/.readme.tmpl +++ b/metrics/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/metrics/vendor/go.uber.org/zap/CHANGELOG.md b/metrics/vendor/go.uber.org/zap/CHANGELOG.md index 11b4659761..6d6cd5f4d7 100644 --- a/metrics/vendor/go.uber.org/zap/CHANGELOG.md +++ b/metrics/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/metrics/vendor/go.uber.org/zap/LICENSE.txt b/metrics/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from metrics/vendor/go.uber.org/zap/LICENSE.txt rename to metrics/vendor/go.uber.org/zap/LICENSE diff --git a/metrics/vendor/go.uber.org/zap/README.md b/metrics/vendor/go.uber.org/zap/README.md index 9de08927be..a17035cb6f 100644 --- a/metrics/vendor/go.uber.org/zap/README.md +++ b/metrics/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/metrics/vendor/go.uber.org/zap/buffer/buffer.go b/metrics/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5da..0b8540c213 100644 --- a/metrics/vendor/go.uber.org/zap/buffer/buffer.go +++ b/metrics/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/metrics/vendor/go.uber.org/zap/field.go b/metrics/vendor/go.uber.org/zap/field.go index c8dd3358a9..6743930b82 100644 --- a/metrics/vendor/go.uber.org/zap/field.go +++ b/metrics/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/metrics/vendor/go.uber.org/zap/logger.go b/metrics/vendor/go.uber.org/zap/logger.go index 6205fe48a6..c4d3003239 100644 --- a/metrics/vendor/go.uber.org/zap/logger.go +++ b/metrics/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/metrics/vendor/go.uber.org/zap/options.go b/metrics/vendor/go.uber.org/zap/options.go index c4f3bca3d2..43d357ac90 100644 --- a/metrics/vendor/go.uber.org/zap/options.go +++ b/metrics/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/metrics/vendor/go.uber.org/zap/sugar.go b/metrics/vendor/go.uber.org/zap/sugar.go index 00ac5fe3ac..8904cd0871 100644 --- a/metrics/vendor/go.uber.org/zap/sugar.go +++ b/metrics/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/metrics/vendor/go.uber.org/zap/zapcore/console_encoder.go b/metrics/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf56..cc2b4e07b9 100644 --- a/metrics/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/metrics/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/metrics/vendor/go.uber.org/zap/zapcore/encoder.go b/metrics/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e4e..0446254156 100644 --- a/metrics/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/metrics/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/metrics/vendor/go.uber.org/zap/zapcore/field.go b/metrics/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a126..308c9781ed 100644 --- a/metrics/vendor/go.uber.org/zap/zapcore/field.go +++ b/metrics/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/metrics/vendor/go.uber.org/zap/zapcore/json_encoder.go b/metrics/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab86979b..9685169b2e 100644 --- a/metrics/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/metrics/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/metrics/vendor/modules.txt b/metrics/vendor/modules.txt index 099e1e40cf..1b62600bda 100644 --- a/metrics/vendor/modules.txt +++ b/metrics/vendor/modules.txt @@ -1,7 +1,7 @@ # github.com/Masterminds/semver/v3 v3.2.1 ## explicit; go 1.18 github.com/Masterminds/semver/v3 -# github.com/aws/aws-sdk-go v1.50.9 +# github.com/aws/aws-sdk-go v1.51.7 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -297,8 +297,8 @@ github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1 # github.com/red-hat-storage/ocs-operator/v4 v4.0.0-00010101000000-000000000000 => ../ ## explicit; go 1.21 github.com/red-hat-storage/ocs-operator/v4/version -# github.com/rook/rook v1.13.7 -## explicit; go 1.20 +# github.com/rook/rook v1.14.3 +## explicit; go 1.21 github.com/rook/rook/pkg/client/clientset/versioned github.com/rook/rook/pkg/client/clientset/versioned/scheme github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1 @@ -322,7 +322,7 @@ github.com/stretchr/testify/assert # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.26.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer diff --git a/tools/volume-migration/main.go b/tools/volume-migration/main.go index 18cb0adcf3..7707bada31 100644 --- a/tools/volume-migration/main.go +++ b/tools/volume-migration/main.go @@ -1,3 +1,7 @@ +// +kubebuilder:skip +// +kubebuilder:validation:Optional +// +optional +// //nolint:typecheck package main diff --git a/vendor/github.com/ceph/go-ceph/LICENSE b/vendor/github.com/ceph/go-ceph/LICENSE new file mode 100644 index 0000000000..08d70bfc05 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Noah Watkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ceph/go-ceph/internal/callbacks/callbacks.go b/vendor/github.com/ceph/go-ceph/internal/callbacks/callbacks.go new file mode 100644 index 0000000000..93bbb7280b --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/callbacks/callbacks.go @@ -0,0 +1,65 @@ +package callbacks + +import ( + "sync" +) + +// The logic of this file is largely adapted from: +// https://github.com/golang/go/wiki/cgo#function-variables +// +// Also helpful: +// https://eli.thegreenplace.net/2019/passing-callbacks-and-pointers-to-cgo/ + +// Callbacks provides a tracker for data that is to be passed between Go +// and C callback functions. The Go callback/object may not be passed +// by a pointer to C code and so instead integer IDs into an internal +// map are used. +// Typically the item being added will either be a callback function or +// a data structure containing a callback function. It is up to the caller +// to control and validate what "callbacks" get used. +type Callbacks struct { + mutex sync.RWMutex + cmap map[uintptr]interface{} + lastID uintptr +} + +// New returns a new callbacks tracker. +func New() *Callbacks { + return &Callbacks{cmap: make(map[uintptr]interface{})} +} + +// getID returns a unique ID. +// NOTE: cb.mutex must be locked already! +func (cb *Callbacks) getID() uintptr { + for exists := true; exists; { + cb.lastID++ + // Sanity check for the very unlikely case of an integer overflow in long + // running processes. + _, exists = cb.cmap[cb.lastID] + } + return cb.lastID +} + +// Add a callback/object to the tracker and return a new ID +// for the object. +func (cb *Callbacks) Add(v interface{}) uintptr { + cb.mutex.Lock() + defer cb.mutex.Unlock() + id := cb.getID() + cb.cmap[id] = v + return id +} + +// Remove a callback/object given it's ID. +func (cb *Callbacks) Remove(id uintptr) { + cb.mutex.Lock() + defer cb.mutex.Unlock() + delete(cb.cmap, id) +} + +// Lookup returns a mapped callback/object given an ID. +func (cb *Callbacks) Lookup(id uintptr) interface{} { + cb.mutex.RLock() + defer cb.mutex.RUnlock() + return cb.cmap[id] +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/aliases.go b/vendor/github.com/ceph/go-ceph/internal/cutil/aliases.go new file mode 100644 index 0000000000..610ead9168 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/aliases.go @@ -0,0 +1,66 @@ +package cutil + +/* +#include +#include +typedef void* voidptr; +*/ +import "C" + +import ( + "math" + "unsafe" +) + +const ( + // MaxIdx is the maximum index on 32 bit systems + MaxIdx = math.MaxInt32 // 2GB, max int32 value, should be safe + + // PtrSize is the size of a pointer + PtrSize = C.sizeof_voidptr + + // SizeTSize is the size of C.size_t + SizeTSize = C.sizeof_size_t +) + +// Compile-time assertion ensuring that Go's `int` is at least as large as C's. +const _ = unsafe.Sizeof(int(0)) - C.sizeof_int + +// SizeT wraps size_t from C. +type SizeT C.size_t + +// This section contains a bunch of types that are basically just +// unsafe.Pointer but have specific types to help "self document" what the +// underlying pointer is really meant to represent. + +// CPtr is an unsafe.Pointer to C allocated memory +type CPtr unsafe.Pointer + +// CharPtrPtr is an unsafe pointer wrapping C's `char**`. +type CharPtrPtr unsafe.Pointer + +// CharPtr is an unsafe pointer wrapping C's `char*`. +type CharPtr unsafe.Pointer + +// SizeTPtr is an unsafe pointer wrapping C's `size_t*`. +type SizeTPtr unsafe.Pointer + +// FreeFunc is a wrapper around calls to, or act like, C's free function. +type FreeFunc func(unsafe.Pointer) + +// Malloc is C.malloc +func Malloc(s SizeT) CPtr { return CPtr(C.malloc(C.size_t(s))) } + +// Free is C.free +func Free(p CPtr) { C.free(unsafe.Pointer(p)) } + +// CString is C.CString +func CString(s string) CharPtr { return CharPtr((C.CString(s))) } + +// CBytes is C.CBytes +func CBytes(b []byte) CPtr { return CPtr(C.CBytes(b)) } + +// Memcpy is C.memcpy +func Memcpy(dst, src CPtr, n SizeT) { + C.memcpy(unsafe.Pointer(dst), unsafe.Pointer(src), C.size_t(n)) +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/buffergroup.go b/vendor/github.com/ceph/go-ceph/internal/cutil/buffergroup.go new file mode 100644 index 0000000000..447ec11cc0 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/buffergroup.go @@ -0,0 +1,89 @@ +package cutil + +// #include +import "C" + +import ( + "unsafe" +) + +// BufferGroup is a helper structure that holds Go-allocated slices of +// C-allocated strings and their respective lengths. Useful for C functions +// that consume byte buffers with explicit length instead of null-terminated +// strings. When used as input arguments in C functions, caller must make sure +// the C code will not hold any pointers to either of the struct's attributes +// after that C function returns. +type BufferGroup struct { + // C-allocated buffers. + Buffers []CharPtr + // Lengths of C buffers, where Lengths[i] = length(Buffers[i]). + Lengths []SizeT +} + +// TODO: should BufferGroup implementation change and the slices would contain +// nested Go pointers, they must be pinned with PtrGuard. + +// NewBufferGroupStrings returns new BufferGroup constructed from strings. +func NewBufferGroupStrings(strs []string) *BufferGroup { + s := &BufferGroup{ + Buffers: make([]CharPtr, len(strs)), + Lengths: make([]SizeT, len(strs)), + } + + for i, str := range strs { + bs := []byte(str) + s.Buffers[i] = CharPtr(C.CBytes(bs)) + s.Lengths[i] = SizeT(len(bs)) + } + + return s +} + +// NewBufferGroupBytes returns new BufferGroup constructed +// from slice of byte slices. +func NewBufferGroupBytes(bss [][]byte) *BufferGroup { + s := &BufferGroup{ + Buffers: make([]CharPtr, len(bss)), + Lengths: make([]SizeT, len(bss)), + } + + for i, bs := range bss { + s.Buffers[i] = CharPtr(C.CBytes(bs)) + s.Lengths[i] = SizeT(len(bs)) + } + + return s +} + +// Free free()s the C-allocated memory. +func (s *BufferGroup) Free() { + for _, ptr := range s.Buffers { + C.free(unsafe.Pointer(ptr)) + } + + s.Buffers = nil + s.Lengths = nil +} + +// BuffersPtr returns a pointer to the beginning of the Buffers slice. +func (s *BufferGroup) BuffersPtr() CharPtrPtr { + if len(s.Buffers) == 0 { + return nil + } + + return CharPtrPtr(&s.Buffers[0]) +} + +// LengthsPtr returns a pointer to the beginning of the Lengths slice. +func (s *BufferGroup) LengthsPtr() SizeTPtr { + if len(s.Lengths) == 0 { + return nil + } + + return SizeTPtr(&s.Lengths[0]) +} + +func testBufferGroupGet(s *BufferGroup, index int) (str string, length int) { + bs := C.GoBytes(unsafe.Pointer(s.Buffers[index]), C.int(s.Lengths[index])) + return string(bs), int(s.Lengths[index]) +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/command_input.go b/vendor/github.com/ceph/go-ceph/internal/cutil/command_input.go new file mode 100644 index 0000000000..fc11b82eac --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/command_input.go @@ -0,0 +1,62 @@ +package cutil + +/* +#include +*/ +import "C" + +import ( + "unsafe" +) + +// CommandInput can be used to manage the input to ceph's *_command functions. +type CommandInput struct { + cmd []*C.char + inbuf []byte +} + +// NewCommandInput creates C-level pointers from go byte buffers suitable +// for passing off to ceph's *_command functions. +func NewCommandInput(cmd [][]byte, inputBuffer []byte) *CommandInput { + ci := &CommandInput{ + cmd: make([]*C.char, len(cmd)), + inbuf: inputBuffer, + } + for i := range cmd { + ci.cmd[i] = C.CString(string(cmd[i])) + } + return ci +} + +// Free any C memory managed by this CommandInput. +func (ci *CommandInput) Free() { + for i := range ci.cmd { + C.free(unsafe.Pointer(ci.cmd[i])) + } + ci.cmd = nil +} + +// Cmd returns an unsafe wrapper around an array of C-strings. +func (ci *CommandInput) Cmd() CharPtrPtr { + ptr := &ci.cmd[0] + return CharPtrPtr(ptr) +} + +// CmdLen returns the length of the array of C-strings returned by +// Cmd. +func (ci *CommandInput) CmdLen() SizeT { + return SizeT(len(ci.cmd)) +} + +// InBuf returns an unsafe wrapper to a C char*. +func (ci *CommandInput) InBuf() CharPtr { + if len(ci.inbuf) == 0 { + return nil + } + return CharPtr(&ci.inbuf[0]) +} + +// InBufLen returns the length of the buffer returned by InBuf. +func (ci *CommandInput) InBufLen() SizeT { + return SizeT(len(ci.inbuf)) +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/command_output.go b/vendor/github.com/ceph/go-ceph/internal/cutil/command_output.go new file mode 100644 index 0000000000..8b0c6e5295 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/command_output.go @@ -0,0 +1,100 @@ +package cutil + +/* +#include +*/ +import "C" + +import ( + "unsafe" +) + +// CommandOutput can be used to manage the outputs of ceph's *_command +// functions. +type CommandOutput struct { + free FreeFunc + outBuf *C.char + outBufLen C.size_t + outs *C.char + outsLen C.size_t +} + +// NewCommandOutput returns an empty CommandOutput. The pointers that +// a CommandOutput provides can be used to get the results of ceph's +// *_command functions. +func NewCommandOutput() *CommandOutput { + return &CommandOutput{ + free: free, + } +} + +// SetFreeFunc sets the function used to free memory held by CommandOutput. +// Not all uses of CommandOutput expect to use the basic C.free function +// and either require or prefer the use of a custom deallocation function. +// Use SetFreeFunc to change the free function and return the modified +// CommandOutput object. +func (co *CommandOutput) SetFreeFunc(f FreeFunc) *CommandOutput { + co.free = f + return co +} + +// Free any C memory tracked by this object. +func (co *CommandOutput) Free() { + if co.outBuf != nil { + co.free(unsafe.Pointer(co.outBuf)) + } + if co.outs != nil { + co.free(unsafe.Pointer(co.outs)) + } +} + +// OutBuf returns an unsafe wrapper around a pointer to a `char*`. +func (co *CommandOutput) OutBuf() CharPtrPtr { + return CharPtrPtr(&co.outBuf) +} + +// OutBufLen returns an unsafe wrapper around a pointer to a size_t. +func (co *CommandOutput) OutBufLen() SizeTPtr { + return SizeTPtr(&co.outBufLen) +} + +// Outs returns an unsafe wrapper around a pointer to a `char*`. +func (co *CommandOutput) Outs() CharPtrPtr { + return CharPtrPtr(&co.outs) +} + +// OutsLen returns an unsafe wrapper around a pointer to a size_t. +func (co *CommandOutput) OutsLen() SizeTPtr { + return SizeTPtr(&co.outsLen) +} + +// GoValues returns native go values converted from the internal C-language +// values tracked by this object. +func (co *CommandOutput) GoValues() (buf []byte, status string) { + if co.outBufLen > 0 { + buf = C.GoBytes(unsafe.Pointer(co.outBuf), C.int(co.outBufLen)) + } + if co.outsLen > 0 { + status = C.GoStringN(co.outs, C.int(co.outsLen)) + } + return buf, status +} + +// testSetString is only used by the unit tests for this file. +// It is located here due to the restriction on having import "C" in +// go test files. :-( +// It mimics a C function that takes a pointer to a +// string and length and allocates memory and sets the pointers +// to the new string and its length. +func testSetString(strp CharPtrPtr, lenp SizeTPtr, s string) { + sp := (**C.char)(strp) + lp := (*C.size_t)(lenp) + *sp = C.CString(s) + *lp = C.size_t(len(s)) +} + +// free wraps C.free. +// Required for unit tests that may not use cgo directly. +func free(p unsafe.Pointer) { + C.free(p) +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/cslice.go b/vendor/github.com/ceph/go-ceph/internal/cutil/cslice.go new file mode 100644 index 0000000000..e13473703e --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/cslice.go @@ -0,0 +1,76 @@ +package cutil + +// The following code needs some explanation: +// This creates slices on top of the C memory buffers allocated before in +// order to safely and comfortably use them as arrays. First the void pointer +// is cast to a pointer to an array of the type that will be stored in the +// array. Because the size of an array is a constant, but the real array size +// is dynamic, we just use the biggest possible index value MaxIdx, to make +// sure it's always big enough. (Nothing is allocated by casting, so the size +// can be arbitrarily big.) So, if the array should store items of myType, the +// cast would be (*[MaxIdx]myItem)(myCMemPtr). +// From that array pointer a slice is created with the [start:end:capacity] +// syntax. The capacity must be set explicitly here, because by default it +// would be set to the size of the original array, which is MaxIdx, which +// doesn't reflect reality in this case. This results in definitions like: +// cSlice := (*[MaxIdx]myItem)(myCMemPtr)[:numOfItems:numOfItems] + +////////// CPtr ////////// + +// CPtrCSlice is a C allocated slice of C pointers. +type CPtrCSlice []CPtr + +// NewCPtrCSlice returns a CPtrSlice. +// Similar to CString it must be freed with slice.Free() +func NewCPtrCSlice(size int) CPtrCSlice { + if size == 0 { + return nil + } + cMem := Malloc(SizeT(size) * PtrSize) + cSlice := (*[MaxIdx]CPtr)(cMem)[:size:size] + return cSlice +} + +// Ptr returns a pointer to CPtrSlice +func (v *CPtrCSlice) Ptr() CPtr { + if len(*v) == 0 { + return nil + } + return CPtr(&(*v)[0]) +} + +// Free frees a CPtrSlice +func (v *CPtrCSlice) Free() { + Free(v.Ptr()) + *v = nil +} + +////////// SizeT ////////// + +// SizeTCSlice is a C allocated slice of C.size_t. +type SizeTCSlice []SizeT + +// NewSizeTCSlice returns a SizeTCSlice. +// Similar to CString it must be freed with slice.Free() +func NewSizeTCSlice(size int) SizeTCSlice { + if size == 0 { + return nil + } + cMem := Malloc(SizeT(size) * SizeTSize) + cSlice := (*[MaxIdx]SizeT)(cMem)[:size:size] + return cSlice +} + +// Ptr returns a pointer to SizeTCSlice +func (v *SizeTCSlice) Ptr() CPtr { + if len(*v) == 0 { + return nil + } + return CPtr(&(*v)[0]) +} + +// Free frees a SizeTCSlice +func (v *SizeTCSlice) Free() { + Free(v.Ptr()) + *v = nil +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/iovec.go b/vendor/github.com/ceph/go-ceph/internal/cutil/iovec.go new file mode 100644 index 0000000000..a711369cc0 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/iovec.go @@ -0,0 +1,60 @@ +package cutil + +/* +#include +#include +*/ +import "C" +import ( + "unsafe" +) + +// Iovec is a slice of iovec structs. Might have allocated C memory, so it must +// be freed with the Free() method. +type Iovec struct { + iovec []C.struct_iovec + sbs []*SyncBuffer +} + +const iovecSize = C.sizeof_struct_iovec + +// ByteSlicesToIovec creates an Iovec and links it to Go buffers in data. +func ByteSlicesToIovec(data [][]byte) (v Iovec) { + n := len(data) + iovecMem := C.malloc(iovecSize * C.size_t(n)) + v.iovec = (*[MaxIdx]C.struct_iovec)(iovecMem)[:n:n] + for i, b := range data { + sb := NewSyncBuffer(CPtr(&v.iovec[i].iov_base), b) + v.sbs = append(v.sbs, sb) + v.iovec[i].iov_len = C.size_t(len(b)) + } + return +} + +// Sync makes sure the slices contain the same as the C buffers +func (v *Iovec) Sync() { + for _, sb := range v.sbs { + sb.Sync() + } +} + +// Pointer returns a pointer to the iovec +func (v *Iovec) Pointer() unsafe.Pointer { + return unsafe.Pointer(&v.iovec[0]) +} + +// Len returns a pointer to the iovec +func (v *Iovec) Len() int { + return len(v.iovec) +} + +// Free the C memory in the Iovec. +func (v *Iovec) Free() { + for _, sb := range v.sbs { + sb.Release() + } + if len(v.iovec) != 0 { + C.free(unsafe.Pointer(&v.iovec[0])) + } + v.iovec = nil +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard.go b/vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard.go new file mode 100644 index 0000000000..d49ed3bfb1 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard.go @@ -0,0 +1,37 @@ +//go:build !go1.21 +// +build !go1.21 + +// This code assumes a non-moving garbage collector, which is the case until at +// least go 1.20 + +package cutil + +import ( + "unsafe" +) + +// PtrGuard respresents a guarded Go pointer (pointing to memory allocated by Go +// runtime) stored in C memory (allocated by C) +type PtrGuard struct { + cPtr CPtr + goPtr unsafe.Pointer +} + +// NewPtrGuard writes the goPtr (pointing to Go memory) into C memory at the +// position cPtr, and returns a PtrGuard object. +func NewPtrGuard(cPtr CPtr, goPtr unsafe.Pointer) *PtrGuard { + var v PtrGuard + v.cPtr = cPtr + v.goPtr = goPtr + p := (*unsafe.Pointer)(unsafe.Pointer(cPtr)) + *p = goPtr + return &v +} + +// Release removes the guarded Go pointer from the C memory by overwriting it +// with NULL. +func (v *PtrGuard) Release() { + p := (*unsafe.Pointer)(unsafe.Pointer(v.cPtr)) + *p = nil + v.goPtr = nil +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard_pinner.go b/vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard_pinner.go new file mode 100644 index 0000000000..3862b5c81c --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/ptrguard_pinner.go @@ -0,0 +1,35 @@ +//go:build go1.21 +// +build go1.21 + +package cutil + +import ( + "runtime" + "unsafe" +) + +// PtrGuard respresents a guarded Go pointer (pointing to memory allocated by Go +// runtime) stored in C memory (allocated by C) +type PtrGuard struct { + cPtr CPtr + pinner runtime.Pinner +} + +// NewPtrGuard writes the goPtr (pointing to Go memory) into C memory at the +// position cPtr, and returns a PtrGuard object. +func NewPtrGuard(cPtr CPtr, goPtr unsafe.Pointer) *PtrGuard { + var v PtrGuard + v.pinner.Pin(goPtr) + v.cPtr = cPtr + p := (*unsafe.Pointer)(unsafe.Pointer(cPtr)) + *p = goPtr + return &v +} + +// Release removes the guarded Go pointer from the C memory by overwriting it +// with NULL. +func (v *PtrGuard) Release() { + p := (*unsafe.Pointer)(unsafe.Pointer(v.cPtr)) + *p = nil + v.pinner.Unpin() +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/splitbuf.go b/vendor/github.com/ceph/go-ceph/internal/cutil/splitbuf.go new file mode 100644 index 0000000000..2f439b0917 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/splitbuf.go @@ -0,0 +1,49 @@ +package cutil + +import "C" + +import ( + "bytes" +) + +// SplitBuffer splits a byte-slice buffer, typically returned from C code, +// into a slice of strings. +// The contents of the buffer are assumed to be null-byte separated. +// If the buffer contains a sequence of null-bytes it will assume that the +// "space" between the bytes are meant to be empty strings. +func SplitBuffer(b []byte) []string { + return splitBufStrings(b, true) +} + +// SplitSparseBuffer splits a byte-slice buffer, typically returned from C code, +// into a slice of strings. +// The contents of the buffer are assumed to be null-byte separated. +// This function assumes that buffer to be "sparse" such that only non-null-byte +// strings will be returned, and no "empty" strings exist if null-bytes +// are found adjacent to each other. +func SplitSparseBuffer(b []byte) []string { + return splitBufStrings(b, false) +} + +// If keepEmpty is true, empty substrings will be returned, by default they are +// excluded from the results. +// This is almost certainly a suboptimal implementation, especially for +// keepEmpty=true case. Optimizing the functions is a job for another day. +func splitBufStrings(b []byte, keepEmpty bool) []string { + values := make([]string, 0) + // the final null byte should be the terminating null in C + // we never want to preserve the empty string after it + if len(b) > 0 && b[len(b)-1] == 0 { + b = b[:len(b)-1] + } + if len(b) == 0 { + return values + } + for _, s := range bytes.Split(b, []byte{0}) { + if !keepEmpty && len(s) == 0 { + continue + } + values = append(values, string(s)) + } + return values +} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer.go b/vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer.go new file mode 100644 index 0000000000..d74cd2bcfc --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer.go @@ -0,0 +1,29 @@ +//go:build !no_ptrguard +// +build !no_ptrguard + +package cutil + +import ( + "unsafe" +) + +// SyncBuffer is a C buffer connected to a data slice +type SyncBuffer struct { + pg *PtrGuard +} + +// NewSyncBuffer creates a C buffer from a data slice and stores it at CPtr +func NewSyncBuffer(cPtr CPtr, data []byte) *SyncBuffer { + var v SyncBuffer + v.pg = NewPtrGuard(cPtr, unsafe.Pointer(&data[0])) + return &v +} + +// Release releases the C buffer and nulls its stored pointer +func (v *SyncBuffer) Release() { + v.pg.Release() +} + +// Sync asserts that changes in the C buffer are available in the data +// slice +func (*SyncBuffer) Sync() {} diff --git a/vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer_memcpy.go b/vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer_memcpy.go new file mode 100644 index 0000000000..cde47c9e9a --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/cutil/sync_buffer_memcpy.go @@ -0,0 +1,38 @@ +//go:build no_ptrguard +// +build no_ptrguard + +package cutil + +// SyncBuffer is a C buffer connected to a data slice +type SyncBuffer struct { + data []byte + cPtr *CPtr +} + +// NewSyncBuffer creates a C buffer from a data slice and stores it at CPtr +func NewSyncBuffer(cPtr CPtr, data []byte) *SyncBuffer { + var v SyncBuffer + v.data = data + v.cPtr = (*CPtr)(cPtr) + *v.cPtr = CBytes(data) + return &v +} + +// Release releases the C buffer and nulls its stored pointer +func (v *SyncBuffer) Release() { + if v.cPtr != nil { + Free(*v.cPtr) + *v.cPtr = nil + v.cPtr = nil + } + v.data = nil +} + +// Sync asserts that changes in the C buffer are available in the data +// slice +func (v *SyncBuffer) Sync() { + if v.cPtr == nil { + return + } + Memcpy(CPtr(&v.data[0]), CPtr(*v.cPtr), SizeT(len(v.data))) +} diff --git a/vendor/github.com/ceph/go-ceph/internal/errutil/strerror.go b/vendor/github.com/ceph/go-ceph/internal/errutil/strerror.go new file mode 100644 index 0000000000..2fb24cbc59 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/errutil/strerror.go @@ -0,0 +1,52 @@ +/* +Package errutil provides common functions for dealing with error conditions for +all ceph api wrappers. +*/ +package errutil + +/* force XSI-complaint strerror_r() */ + +// #define _POSIX_C_SOURCE 200112L +// #undef _GNU_SOURCE +// #include +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +// FormatErrno returns the absolute value of the errno as well as a string +// describing the errno. The string will be empty is the errno is not known. +func FormatErrno(errno int) (int, string) { + buf := make([]byte, 1024) + // strerror expects errno >= 0 + if errno < 0 { + errno = -errno + } + + ret := C.strerror_r( + C.int(errno), + (*C.char)(unsafe.Pointer(&buf[0])), + C.size_t(len(buf))) + if ret != 0 { + return errno, "" + } + + return errno, C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) +} + +// FormatErrorCode returns a string that describes the supplied error source +// and error code as a string. Suitable to use in Error() methods. If the +// error code maps to an errno the string will contain a description of the +// error. Otherwise the string will only indicate the source and value if the +// value does not map to a known errno. +func FormatErrorCode(source string, errValue int) string { + _, s := FormatErrno(errValue) + if s == "" { + return fmt.Sprintf("%s: ret=%d", source, errValue) + } + return fmt.Sprintf("%s: ret=%d, %s", source, errValue, s) +} diff --git a/vendor/github.com/ceph/go-ceph/internal/log/log.go b/vendor/github.com/ceph/go-ceph/internal/log/log.go new file mode 100644 index 0000000000..90fc306273 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/log/log.go @@ -0,0 +1,14 @@ +// Package log is the internal package for go-ceph logging. This package is only +// used from go-ceph code, not from consumers of go-ceph. go-ceph code uses the +// functions in this package to log information that can't be returned as +// errors. The functions default to no-ops and can be set with the external log +// package common/log by the go-ceph consumers. +package log + +func noop(string, ...interface{}) {} + +// These variables are set by the common log package. +var ( + Warnf = noop + Debugf = noop +) diff --git a/vendor/github.com/ceph/go-ceph/internal/retry/sizer.go b/vendor/github.com/ceph/go-ceph/internal/retry/sizer.go new file mode 100644 index 0000000000..5b27e51030 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/retry/sizer.go @@ -0,0 +1,64 @@ +package retry + +// Hint is a type for retry hints +type Hint interface { + If(bool) Hint + size() int +} + +type hintInt int + +func (hint hintInt) size() int { + return int(hint) +} + +// If is a convenience function, that returns a given hint only if a certain +// condition is met (for example a test for a "buffer too small" error). +// Otherwise it returns a nil which stops the retries. +func (hint hintInt) If(cond bool) Hint { + if cond { + return hint + } + return nil +} + +// DoubleSize is a hint to retry with double the size +const DoubleSize = hintInt(0) + +// Size returns a hint for a specific size +func Size(s int) Hint { + return hintInt(s) +} + +// SizeFunc is used to implement 'resize loops' that hides the complexity of the +// sizing away from most of the application. It's a function that takes a size +// argument and returns nil, if no retry is necessary, or a hint indicating the +// size for the next retry. If errors or other results are required from the +// function, the function can write them to function closures of the surrounding +// scope. See tests for examples. +type SizeFunc func(size int) (hint Hint) + +// WithSizes repeatingly calls a SizeFunc with increasing sizes until either it +// returns nil, or the max size has been reached. If the returned hint is +// DoubleSize or indicating a size not greater than the current size, the size +// is doubled. If the hint or next size is greater than the max size, the max +// size is used for a last retry. +func WithSizes(size int, max int, f SizeFunc) { + if size > max { + return + } + for { + hint := f(size) + if hint == nil || size == max { + break + } + if hint.size() > size { + size = hint.size() + } else { + size *= 2 + } + if size > max { + size = max + } + } +} diff --git a/vendor/github.com/ceph/go-ceph/internal/timespec/timespec.go b/vendor/github.com/ceph/go-ceph/internal/timespec/timespec.go new file mode 100644 index 0000000000..1e4b09acd0 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/internal/timespec/timespec.go @@ -0,0 +1,39 @@ +package timespec + +/* +#include +*/ +import "C" + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Timespec behaves similarly to C's struct timespec. +// Timespec is used to retain fidelity to the C based file systems +// apis that could be lossy with the use of Go time types. +type Timespec unix.Timespec + +// CTimespecPtr is an unsafe pointer wrapping C's `struct timespec`. +type CTimespecPtr unsafe.Pointer + +// CStructToTimespec creates a new Timespec for the C 'struct timespec'. +func CStructToTimespec(cts CTimespecPtr) Timespec { + t := (*C.struct_timespec)(cts) + + return Timespec{ + Sec: int64(t.tv_sec), + Nsec: int64(t.tv_nsec), + } +} + +// CopyToCStruct copies the time values from a Timespec to a previously +// allocated C `struct timespec`. Due to restrictions on Cgo the C pointer +// must be passed via the CTimespecPtr wrapper. +func CopyToCStruct(ts Timespec, cts CTimespecPtr) { + t := (*C.struct_timespec)(cts) + t.tv_sec = C.time_t(ts.Sec) + t.tv_nsec = C.long(ts.Nsec) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/alloc_hint_flags.go b/vendor/github.com/ceph/go-ceph/rados/alloc_hint_flags.go new file mode 100644 index 0000000000..1fd51cdb6a --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/alloc_hint_flags.go @@ -0,0 +1,34 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// +import "C" + +// AllocHintFlags control the behavior of read and write operations. +type AllocHintFlags uint32 + +const ( + // AllocHintNoHint indicates no predefined behavior + AllocHintNoHint = AllocHintFlags(0) + // AllocHintSequentialWrite TODO + AllocHintSequentialWrite = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE) + // AllocHintRandomWrite TODO + AllocHintRandomWrite = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_RANDOM_WRITE) + // AllocHintSequentialRead TODO + AllocHintSequentialRead = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_SEQUENTIAL_READ) + // AllocHintRandomRead TODO + AllocHintRandomRead = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_RANDOM_READ) + // AllocHintAppendOnly TODO + AllocHintAppendOnly = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_APPEND_ONLY) + // AllocHintImmutable TODO + AllocHintImmutable = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_IMMUTABLE) + // AllocHintShortlived TODO + AllocHintShortlived = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_SHORTLIVED) + // AllocHintLonglived TODO + AllocHintLonglived = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_LONGLIVED) + // AllocHintCompressible TODO + AllocHintCompressible = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_COMPRESSIBLE) + // AllocHintIncompressible TODO + AllocHintIncompressible = AllocHintFlags(C.LIBRADOS_ALLOC_HINT_FLAG_INCOMPRESSIBLE) +) diff --git a/vendor/github.com/ceph/go-ceph/rados/command.go b/vendor/github.com/ceph/go-ceph/rados/command.go new file mode 100644 index 0000000000..3694abf573 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/command.go @@ -0,0 +1,198 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" +) + +func radosBufferFree(p unsafe.Pointer) { + C.rados_buffer_free((*C.char)(p)) +} + +// MonCommand sends a command to one of the monitors +func (c *Conn) MonCommand(args []byte) ([]byte, string, error) { + return c.MonCommandWithInputBuffer(args, nil) +} + +// MonCommandWithInputBuffer sends a command to one of the monitors, with an input buffer +func (c *Conn) MonCommandWithInputBuffer(args, inputBuffer []byte) ([]byte, string, error) { + ci := cutil.NewCommandInput([][]byte{args}, inputBuffer) + defer ci.Free() + co := cutil.NewCommandOutput().SetFreeFunc(radosBufferFree) + defer co.Free() + + ret := C.rados_mon_command( + c.cluster, + (**C.char)(ci.Cmd()), + C.size_t(ci.CmdLen()), + (*C.char)(ci.InBuf()), + C.size_t(ci.InBufLen()), + (**C.char)(co.OutBuf()), + (*C.size_t)(co.OutBufLen()), + (**C.char)(co.Outs()), + (*C.size_t)(co.OutsLen())) + buf, status := co.GoValues() + return buf, status, getError(ret) +} + +// PGCommand sends a command to one of the PGs +// +// Implements: +// +// int rados_pg_command(rados_t cluster, const char *pgstr, +// const char **cmd, size_t cmdlen, +// const char *inbuf, size_t inbuflen, +// char **outbuf, size_t *outbuflen, +// char **outs, size_t *outslen); +func (c *Conn) PGCommand(pgid []byte, args [][]byte) ([]byte, string, error) { + return c.PGCommandWithInputBuffer(pgid, args, nil) +} + +// PGCommandWithInputBuffer sends a command to one of the PGs, with an input buffer +// +// Implements: +// +// int rados_pg_command(rados_t cluster, const char *pgstr, +// const char **cmd, size_t cmdlen, +// const char *inbuf, size_t inbuflen, +// char **outbuf, size_t *outbuflen, +// char **outs, size_t *outslen); +func (c *Conn) PGCommandWithInputBuffer(pgid []byte, args [][]byte, inputBuffer []byte) ([]byte, string, error) { + name := C.CString(string(pgid)) + defer C.free(unsafe.Pointer(name)) + ci := cutil.NewCommandInput(args, inputBuffer) + defer ci.Free() + co := cutil.NewCommandOutput().SetFreeFunc(radosBufferFree) + defer co.Free() + + ret := C.rados_pg_command( + c.cluster, + name, + (**C.char)(ci.Cmd()), + C.size_t(ci.CmdLen()), + (*C.char)(ci.InBuf()), + C.size_t(ci.InBufLen()), + (**C.char)(co.OutBuf()), + (*C.size_t)(co.OutBufLen()), + (**C.char)(co.Outs()), + (*C.size_t)(co.OutsLen())) + buf, status := co.GoValues() + return buf, status, getError(ret) +} + +// MgrCommand sends a command to a ceph-mgr. +func (c *Conn) MgrCommand(args [][]byte) ([]byte, string, error) { + return c.MgrCommandWithInputBuffer(args, nil) +} + +// MgrCommandWithInputBuffer sends a command, with an input buffer, to a ceph-mgr. +// +// Implements: +// +// int rados_mgr_command(rados_t cluster, const char **cmd, +// size_t cmdlen, const char *inbuf, +// size_t inbuflen, char **outbuf, +// size_t *outbuflen, char **outs, +// size_t *outslen); +func (c *Conn) MgrCommandWithInputBuffer(args [][]byte, inputBuffer []byte) ([]byte, string, error) { + ci := cutil.NewCommandInput(args, inputBuffer) + defer ci.Free() + co := cutil.NewCommandOutput().SetFreeFunc(radosBufferFree) + defer co.Free() + + ret := C.rados_mgr_command( + c.cluster, + (**C.char)(ci.Cmd()), + C.size_t(ci.CmdLen()), + (*C.char)(ci.InBuf()), + C.size_t(ci.InBufLen()), + (**C.char)(co.OutBuf()), + (*C.size_t)(co.OutBufLen()), + (**C.char)(co.Outs()), + (*C.size_t)(co.OutsLen())) + buf, status := co.GoValues() + return buf, status, getError(ret) +} + +// OsdCommand sends a command to the specified ceph OSD. +func (c *Conn) OsdCommand(osd int, args [][]byte) ([]byte, string, error) { + return c.OsdCommandWithInputBuffer(osd, args, nil) +} + +// OsdCommandWithInputBuffer sends a command, with an input buffer, to the +// specified ceph OSD. +// +// Implements: +// +// int rados_osd_command(rados_t cluster, int osdid, +// const char **cmd, size_t cmdlen, +// const char *inbuf, size_t inbuflen, +// char **outbuf, size_t *outbuflen, +// char **outs, size_t *outslen); +func (c *Conn) OsdCommandWithInputBuffer( + osd int, args [][]byte, inputBuffer []byte) ([]byte, string, error) { + + ci := cutil.NewCommandInput(args, inputBuffer) + defer ci.Free() + co := cutil.NewCommandOutput().SetFreeFunc(radosBufferFree) + defer co.Free() + + ret := C.rados_osd_command( + c.cluster, + C.int(osd), + (**C.char)(ci.Cmd()), + C.size_t(ci.CmdLen()), + (*C.char)(ci.InBuf()), + C.size_t(ci.InBufLen()), + (**C.char)(co.OutBuf()), + (*C.size_t)(co.OutBufLen()), + (**C.char)(co.Outs()), + (*C.size_t)(co.OutsLen())) + buf, status := co.GoValues() + return buf, status, getError(ret) +} + +// MonCommandTarget sends a command to a specified monitor. +func (c *Conn) MonCommandTarget(name string, args [][]byte) ([]byte, string, error) { + return c.MonCommandTargetWithInputBuffer(name, args, nil) +} + +// MonCommandTargetWithInputBuffer sends a command, with an input buffer, to a specified monitor. +// +// Implements: +// +// int rados_mon_command_target(rados_t cluster, const char *name, +// const char **cmd, size_t cmdlen, +// const char *inbuf, size_t inbuflen, +// char **outbuf, size_t *outbuflen, +// char **outs, size_t *outslen); +func (c *Conn) MonCommandTargetWithInputBuffer( + name string, args [][]byte, inputBuffer []byte) ([]byte, string, error) { + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + ci := cutil.NewCommandInput(args, inputBuffer) + defer ci.Free() + co := cutil.NewCommandOutput().SetFreeFunc(radosBufferFree) + defer co.Free() + + ret := C.rados_mon_command_target( + c.cluster, + cName, + (**C.char)(ci.Cmd()), + C.size_t(ci.CmdLen()), + (*C.char)(ci.InBuf()), + C.size_t(ci.InBufLen()), + (**C.char)(co.OutBuf()), + (*C.size_t)(co.OutBufLen()), + (**C.char)(co.Outs()), + (*C.size_t)(co.OutsLen())) + buf, status := co.GoValues() + return buf, status, getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/conn.go b/vendor/github.com/ceph/go-ceph/rados/conn.go new file mode 100644 index 0000000000..b866a796b6 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/conn.go @@ -0,0 +1,313 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" +) + +var argvPlaceholder = "placeholder" + +//revive:disable:var-naming old-yet-exported public api + +// ClusterStat represents Ceph cluster statistics. +type ClusterStat struct { + Kb uint64 + Kb_used uint64 + Kb_avail uint64 + Num_objects uint64 +} + +//revive:enable:var-naming + +// Conn is a connection handle to a Ceph cluster. +type Conn struct { + cluster C.rados_t + connected bool +} + +// ClusterRef represents a fundamental RADOS cluster connection. +type ClusterRef C.rados_t + +// Cluster returns the underlying RADOS cluster reference for this Conn. +func (c *Conn) Cluster() ClusterRef { + return ClusterRef(c.cluster) +} + +// PingMonitor sends a ping to a monitor and returns the reply. +func (c *Conn) PingMonitor(id string) (string, error) { + cid := C.CString(id) + defer C.free(unsafe.Pointer(cid)) + + var strlen C.size_t + var strout *C.char + + ret := C.rados_ping_monitor(c.cluster, cid, &strout, &strlen) + defer C.rados_buffer_free(strout) + + if ret == 0 { + reply := C.GoStringN(strout, (C.int)(strlen)) + return reply, nil + } + return "", getError(ret) +} + +// Connect establishes a connection to a RADOS cluster. It returns an error, +// if any. +func (c *Conn) Connect() error { + ret := C.rados_connect(c.cluster) + if ret != 0 { + return getError(ret) + } + c.connected = true + return nil +} + +// Shutdown disconnects from the cluster. +func (c *Conn) Shutdown() { + if err := c.ensureConnected(); err != nil { + return + } + freeConn(c) +} + +// ReadConfigFile configures the connection using a Ceph configuration file. +func (c *Conn) ReadConfigFile(path string) error { + cPath := C.CString(path) + defer C.free(unsafe.Pointer(cPath)) + ret := C.rados_conf_read_file(c.cluster, cPath) + return getError(ret) +} + +// ReadDefaultConfigFile configures the connection using a Ceph configuration +// file located at default locations. +func (c *Conn) ReadDefaultConfigFile() error { + ret := C.rados_conf_read_file(c.cluster, nil) + return getError(ret) +} + +// OpenIOContext creates and returns a new IOContext for the given pool. +// +// Implements: +// +// int rados_ioctx_create(rados_t cluster, const char *pool_name, +// rados_ioctx_t *ioctx); +func (c *Conn) OpenIOContext(pool string) (*IOContext, error) { + cPool := C.CString(pool) + defer C.free(unsafe.Pointer(cPool)) + ioctx := &IOContext{conn: c} + ret := C.rados_ioctx_create(c.cluster, cPool, &ioctx.ioctx) + if ret == 0 { + return ioctx, nil + } + return nil, getError(ret) +} + +// ListPools returns the names of all existing pools. +func (c *Conn) ListPools() (names []string, err error) { + buf := make([]byte, 4096) + for { + ret := C.rados_pool_list(c.cluster, + (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) + if ret < 0 { + return nil, getError(ret) + } + + if int(ret) > len(buf) { + buf = make([]byte, ret) + continue + } + + names = cutil.SplitSparseBuffer(buf[:ret]) + return names, nil + } +} + +// SetConfigOption sets the value of the configuration option identified by +// the given name. +func (c *Conn) SetConfigOption(option, value string) error { + cOpt, cVal := C.CString(option), C.CString(value) + defer C.free(unsafe.Pointer(cOpt)) + defer C.free(unsafe.Pointer(cVal)) + ret := C.rados_conf_set(c.cluster, cOpt, cVal) + return getError(ret) +} + +// GetConfigOption returns the value of the Ceph configuration option +// identified by the given name. +func (c *Conn) GetConfigOption(name string) (value string, err error) { + cOption := C.CString(name) + defer C.free(unsafe.Pointer(cOption)) + + var buf []byte + // range from 4k to 256KiB + retry.WithSizes(4096, 1<<18, func(size int) retry.Hint { + buf = make([]byte, size) + ret := C.rados_conf_get( + c.cluster, + cOption, + (*C.char)(unsafe.Pointer(&buf[0])), + C.size_t(len(buf))) + err = getError(ret) + return retry.DoubleSize.If(err == errNameTooLong) + }) + if err != nil { + return "", err + } + value = C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) + return value, nil +} + +// WaitForLatestOSDMap blocks the caller until the latest OSD map has been +// retrieved. +func (c *Conn) WaitForLatestOSDMap() error { + ret := C.rados_wait_for_latest_osdmap(c.cluster) + return getError(ret) +} + +func (c *Conn) ensureConnected() error { + if c.connected { + return nil + } + return ErrNotConnected +} + +// GetClusterStats returns statistics about the cluster associated with the +// connection. +func (c *Conn) GetClusterStats() (stat ClusterStat, err error) { + if err := c.ensureConnected(); err != nil { + return ClusterStat{}, err + } + cStat := C.struct_rados_cluster_stat_t{} + ret := C.rados_cluster_stat(c.cluster, &cStat) + if ret < 0 { + return ClusterStat{}, getError(ret) + } + return ClusterStat{ + Kb: uint64(cStat.kb), + Kb_used: uint64(cStat.kb_used), + Kb_avail: uint64(cStat.kb_avail), + Num_objects: uint64(cStat.num_objects), + }, nil +} + +// ParseConfigArgv configures the connection using a unix style command line +// argument vector. +// +// Implements: +// +// int rados_conf_parse_argv(rados_t cluster, int argc, +// const char **argv); +func (c *Conn) ParseConfigArgv(argv []string) error { + if c.cluster == nil { + return ErrNotConnected + } + if len(argv) == 0 { + return ErrEmptyArgument + } + cargv := make([]*C.char, len(argv)) + for i := range argv { + cargv[i] = C.CString(argv[i]) + defer C.free(unsafe.Pointer(cargv[i])) + } + + ret := C.rados_conf_parse_argv(c.cluster, C.int(len(cargv)), &cargv[0]) + return getError(ret) +} + +// ParseCmdLineArgs configures the connection from command line arguments. +// +// This function passes a placeholder value to Ceph as argv[0], see +// ParseConfigArgv for a version of this function that allows the caller to +// specify argv[0]. +func (c *Conn) ParseCmdLineArgs(args []string) error { + argv := make([]string, len(args)+1) + // Ceph expects a proper argv array as the actual contents with the + // first element containing the executable name + argv[0] = argvPlaceholder + for i := range args { + argv[i+1] = args[i] + } + return c.ParseConfigArgv(argv) +} + +// ParseDefaultConfigEnv configures the connection from the default Ceph +// environment variable CEPH_ARGS. +func (c *Conn) ParseDefaultConfigEnv() error { + ret := C.rados_conf_parse_env(c.cluster, nil) + return getError(ret) +} + +// GetFSID returns the fsid of the cluster as a hexadecimal string. The fsid +// is a unique identifier of an entire Ceph cluster. +func (c *Conn) GetFSID() (fsid string, err error) { + buf := make([]byte, 37) + ret := C.rados_cluster_fsid(c.cluster, + (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf))) + // FIXME: the success case isn't documented correctly in librados.h + if ret == 36 { + fsid = C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) + return fsid, nil + } + return "", getError(ret) +} + +// GetInstanceID returns a globally unique identifier for the cluster +// connection instance. +func (c *Conn) GetInstanceID() uint64 { + // FIXME: are there any error cases for this? + return uint64(C.rados_get_instance_id(c.cluster)) +} + +// MakePool creates a new pool with default settings. +func (c *Conn) MakePool(name string) error { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + ret := C.rados_pool_create(c.cluster, cName) + return getError(ret) +} + +// DeletePool deletes a pool and all the data inside the pool. +func (c *Conn) DeletePool(name string) error { + if err := c.ensureConnected(); err != nil { + return err + } + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + ret := C.rados_pool_delete(c.cluster, cName) + return getError(ret) +} + +// GetPoolByName returns the ID of the pool with a given name. +func (c *Conn) GetPoolByName(name string) (int64, error) { + if err := c.ensureConnected(); err != nil { + return 0, err + } + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + ret := int64(C.rados_pool_lookup(c.cluster, cName)) + if ret < 0 { + return 0, radosError(ret) + } + return ret, nil +} + +// GetPoolByID returns the name of a pool by a given ID. +func (c *Conn) GetPoolByID(id int64) (string, error) { + buf := make([]byte, 4096) + if err := c.ensureConnected(); err != nil { + return "", err + } + cid := C.int64_t(id) + ret := int(C.rados_pool_reverse_lookup(c.cluster, cid, (*C.char)(unsafe.Pointer(&buf[0])), C.size_t(len(buf)))) + if ret < 0 { + return "", radosError(ret) + } + return C.GoString((*C.char)(unsafe.Pointer(&buf[0]))), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/doc.go b/vendor/github.com/ceph/go-ceph/rados/doc.go new file mode 100644 index 0000000000..5b7d82bf1c --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/doc.go @@ -0,0 +1,4 @@ +/* +Package rados contains a set of wrappers around Ceph's librados API. +*/ +package rados diff --git a/vendor/github.com/ceph/go-ceph/rados/errors.go b/vendor/github.com/ceph/go-ceph/rados/errors.go new file mode 100644 index 0000000000..d0de0c80bc --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/errors.go @@ -0,0 +1,86 @@ +package rados + +/* +#include +*/ +import "C" + +import ( + "errors" + + "github.com/ceph/go-ceph/internal/errutil" +) + +// radosError represents an error condition returned from the Ceph RADOS APIs. +type radosError int + +// Error returns the error string for the radosError type. +func (e radosError) Error() string { + return errutil.FormatErrorCode("rados", int(e)) +} + +func (e radosError) ErrorCode() int { + return int(e) +} + +func getError(e C.int) error { + if e == 0 { + return nil + } + return radosError(e) +} + +// getErrorIfNegative converts a ceph return code to error if negative. +// This is useful for functions that return a usable positive value on +// success but a negative error number on error. +func getErrorIfNegative(ret C.int) error { + if ret >= 0 { + return nil + } + return getError(ret) +} + +// Public go errors: + +var ( + // ErrNotConnected is returned when functions are called + // without a RADOS connection. + ErrNotConnected = errors.New("RADOS not connected") + // ErrEmptyArgument may be returned if a function argument is passed + // a zero-length slice or map. + ErrEmptyArgument = errors.New("Argument must contain at least one item") + // ErrInvalidIOContext may be returned if an api call requires an IOContext + // but IOContext is not ready for use. + ErrInvalidIOContext = errors.New("IOContext is not ready for use") + // ErrOperationIncomplete is returned from write op or read op steps for + // which the operation has not been performed yet. + ErrOperationIncomplete = errors.New("Operation has not been performed yet") +) + +// Public radosErrors: + +const ( + // ErrNotFound indicates a missing resource. + ErrNotFound = radosError(-C.ENOENT) + // ErrPermissionDenied indicates a permissions issue. + ErrPermissionDenied = radosError(-C.EPERM) + // ErrObjectExists indicates that an exclusive object creation failed. + ErrObjectExists = radosError(-C.EEXIST) + + // RadosErrorNotFound indicates a missing resource. + // + // Deprecated: use ErrNotFound instead + RadosErrorNotFound = ErrNotFound + // RadosErrorPermissionDenied indicates a permissions issue. + // + // Deprecated: use ErrPermissionDenied instead + RadosErrorPermissionDenied = ErrPermissionDenied +) + +// Private errors: + +const ( + errNameTooLong = radosError(-C.ENAMETOOLONG) + + errRange = radosError(-C.ERANGE) +) diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx.go b/vendor/github.com/ceph/go-ceph/rados/ioctx.go new file mode 100644 index 0000000000..e6e35bf19b --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/ioctx.go @@ -0,0 +1,725 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// #include +// +// char* nextChunk(char **idx) { +// char *copy; +// copy = strdup(*idx); +// *idx += strlen(*idx) + 1; +// return copy; +// } +// +// #if __APPLE__ +// #define ceph_time_t __darwin_time_t +// #define ceph_suseconds_t __darwin_suseconds_t +// #elif __GLIBC__ +// #define ceph_time_t __time_t +// #define ceph_suseconds_t __suseconds_t +// #else +// #define ceph_time_t time_t +// #define ceph_suseconds_t suseconds_t +// #endif +import "C" + +import ( + "syscall" + "time" + "unsafe" + + "github.com/ceph/go-ceph/internal/retry" +) + +// CreateOption is passed to IOContext.Create() and should be one of +// CreateExclusive or CreateIdempotent. +type CreateOption int + +const ( + // CreateExclusive if used with IOContext.Create() and the object + // already exists, the function will return an error. + CreateExclusive = C.LIBRADOS_CREATE_EXCLUSIVE + // CreateIdempotent if used with IOContext.Create() and the object + // already exists, the function will not return an error. + CreateIdempotent = C.LIBRADOS_CREATE_IDEMPOTENT + + defaultListObjectsResultSize = 1000 + // listEndSentinel is the value returned by rados_list_object_list_is_end + // when a cursor has reached the end of a pool + listEndSentinel = 1 +) + +//revive:disable:var-naming old-yet-exported public api + +// PoolStat represents Ceph pool statistics. +type PoolStat struct { + // space used in bytes + Num_bytes uint64 + // space used in KB + Num_kb uint64 + // number of objects in the pool + Num_objects uint64 + // number of clones of objects + Num_object_clones uint64 + // num_objects * num_replicas + Num_object_copies uint64 + Num_objects_missing_on_primary uint64 + // number of objects found on no OSDs + Num_objects_unfound uint64 + // number of objects replicated fewer times than they should be + // (but found on at least one OSD) + Num_objects_degraded uint64 + Num_rd uint64 + Num_rd_kb uint64 + Num_wr uint64 + Num_wr_kb uint64 +} + +//revive:enable:var-naming + +// ObjectStat represents an object stat information +type ObjectStat struct { + // current length in bytes + Size uint64 + // last modification time + ModTime time.Time +} + +// LockInfo represents information on a current Ceph lock +type LockInfo struct { + NumLockers int + Exclusive bool + Tag string + Clients []string + Cookies []string + Addrs []string +} + +// IOContext represents a context for performing I/O within a pool. +type IOContext struct { + ioctx C.rados_ioctx_t + + // Hold a reference back to the connection that the ioctx depends on so + // that Go's GC doesn't trigger the Conn's finalizer before this + // IOContext is destroyed. + conn *Conn +} + +// validate returns an error if the ioctx is not ready to be used +// with ceph C calls. +func (ioctx *IOContext) validate() error { + if ioctx.ioctx == nil { + return ErrInvalidIOContext + } + return nil +} + +// Pointer returns a pointer reference to an internal structure. +// This function should NOT be used outside of go-ceph itself. +func (ioctx *IOContext) Pointer() unsafe.Pointer { + return unsafe.Pointer(ioctx.ioctx) +} + +// SetNamespace sets the namespace for objects within this IO context (pool). +// Setting namespace to a empty or zero length string sets the pool to the default namespace. +// +// Implements: +// +// void rados_ioctx_set_namespace(rados_ioctx_t io, +// const char *nspace); +func (ioctx *IOContext) SetNamespace(namespace string) { + var cns *C.char + if len(namespace) > 0 { + cns = C.CString(namespace) + defer C.free(unsafe.Pointer(cns)) + } + C.rados_ioctx_set_namespace(ioctx.ioctx, cns) +} + +// Create a new object with key oid. +// +// Implements: +// +// void rados_write_op_create(rados_write_op_t write_op, int exclusive, +// const char* category) +func (ioctx *IOContext) Create(oid string, exclusive CreateOption) error { + op := CreateWriteOp() + defer op.Release() + op.Create(exclusive) + return op.operateCompat(ioctx, oid) +} + +// Write writes len(data) bytes to the object with key oid starting at byte +// offset offset. It returns an error, if any. +func (ioctx *IOContext) Write(oid string, data []byte, offset uint64) error { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + dataPointer := unsafe.Pointer(nil) + if len(data) > 0 { + dataPointer = unsafe.Pointer(&data[0]) + } + + ret := C.rados_write(ioctx.ioctx, coid, + (*C.char)(dataPointer), + (C.size_t)(len(data)), + (C.uint64_t)(offset)) + + return getError(ret) +} + +// WriteFull writes len(data) bytes to the object with key oid. +// The object is filled with the provided data. If the object exists, +// it is atomically truncated and then written. It returns an error, if any. +func (ioctx *IOContext) WriteFull(oid string, data []byte) error { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + ret := C.rados_write_full(ioctx.ioctx, coid, + (*C.char)(unsafe.Pointer(&data[0])), + (C.size_t)(len(data))) + return getError(ret) +} + +// Append appends len(data) bytes to the object with key oid. +// The object is appended with the provided data. If the object exists, +// it is atomically appended to. It returns an error, if any. +func (ioctx *IOContext) Append(oid string, data []byte) error { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + ret := C.rados_append(ioctx.ioctx, coid, + (*C.char)(unsafe.Pointer(&data[0])), + (C.size_t)(len(data))) + return getError(ret) +} + +// Read reads up to len(data) bytes from the object with key oid starting at byte +// offset offset. It returns the number of bytes read and an error, if any. +func (ioctx *IOContext) Read(oid string, data []byte, offset uint64) (int, error) { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + var buf *C.char + if len(data) > 0 { + buf = (*C.char)(unsafe.Pointer(&data[0])) + } + + ret := C.rados_read( + ioctx.ioctx, + coid, + buf, + (C.size_t)(len(data)), + (C.uint64_t)(offset)) + + if ret >= 0 { + return int(ret), nil + } + return 0, getError(ret) +} + +// Delete deletes the object with key oid. It returns an error, if any. +func (ioctx *IOContext) Delete(oid string) error { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + return getError(C.rados_remove(ioctx.ioctx, coid)) +} + +// Truncate resizes the object with key oid to size size. If the operation +// enlarges the object, the new area is logically filled with zeroes. If the +// operation shrinks the object, the excess data is removed. It returns an +// error, if any. +func (ioctx *IOContext) Truncate(oid string, size uint64) error { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + return getError(C.rados_trunc(ioctx.ioctx, coid, (C.uint64_t)(size))) +} + +// Destroy informs librados that the I/O context is no longer in use. +// Resources associated with the context may not be freed immediately, and the +// context should not be used again after calling this method. +func (ioctx *IOContext) Destroy() { + C.rados_ioctx_destroy(ioctx.ioctx) +} + +// GetPoolStats returns a set of statistics about the pool associated with this I/O +// context. +// +// Implements: +// +// int rados_ioctx_pool_stat(rados_ioctx_t io, +// struct rados_pool_stat_t *stats); +func (ioctx *IOContext) GetPoolStats() (stat PoolStat, err error) { + cStat := C.struct_rados_pool_stat_t{} + ret := C.rados_ioctx_pool_stat(ioctx.ioctx, &cStat) + if ret < 0 { + return PoolStat{}, getError(ret) + } + return PoolStat{ + Num_bytes: uint64(cStat.num_bytes), + Num_kb: uint64(cStat.num_kb), + Num_objects: uint64(cStat.num_objects), + Num_object_clones: uint64(cStat.num_object_clones), + Num_object_copies: uint64(cStat.num_object_copies), + Num_objects_missing_on_primary: uint64(cStat.num_objects_missing_on_primary), + Num_objects_unfound: uint64(cStat.num_objects_unfound), + Num_objects_degraded: uint64(cStat.num_objects_degraded), + Num_rd: uint64(cStat.num_rd), + Num_rd_kb: uint64(cStat.num_rd_kb), + Num_wr: uint64(cStat.num_wr), + Num_wr_kb: uint64(cStat.num_wr_kb), + }, nil +} + +// GetPoolID returns the pool ID associated with the I/O context. +// +// Implements: +// +// int64_t rados_ioctx_get_id(rados_ioctx_t io) +func (ioctx *IOContext) GetPoolID() int64 { + ret := C.rados_ioctx_get_id(ioctx.ioctx) + return int64(ret) +} + +// GetPoolName returns the name of the pool associated with the I/O context. +func (ioctx *IOContext) GetPoolName() (name string, err error) { + var ( + buf []byte + ret C.int + ) + retry.WithSizes(128, 8192, func(size int) retry.Hint { + buf = make([]byte, size) + ret = C.rados_ioctx_get_pool_name( + ioctx.ioctx, + (*C.char)(unsafe.Pointer(&buf[0])), + C.unsigned(len(buf))) + err = getErrorIfNegative(ret) + return retry.DoubleSize.If(err == errRange) + }) + if err != nil { + return "", err + } + name = C.GoStringN((*C.char)(unsafe.Pointer(&buf[0])), ret) + return name, nil +} + +// ObjectListFunc is the type of the function called for each object visited +// by ListObjects. +type ObjectListFunc func(oid string) + +// ListObjects lists all of the objects in the pool associated with the I/O +// context, and called the provided listFn function for each object, passing +// to the function the name of the object. Call SetNamespace with +// RadosAllNamespaces before calling this function to return objects from all +// namespaces +func (ioctx *IOContext) ListObjects(listFn ObjectListFunc) error { + pageResults := C.size_t(defaultListObjectsResultSize) + var filterLen C.size_t + results := make([]C.rados_object_list_item, pageResults) + + next := C.rados_object_list_begin(ioctx.ioctx) + if next == nil { + return ErrNotFound + } + defer C.rados_object_list_cursor_free(ioctx.ioctx, next) + finish := C.rados_object_list_end(ioctx.ioctx) + if finish == nil { + return ErrNotFound + } + defer C.rados_object_list_cursor_free(ioctx.ioctx, finish) + + for { + res := (*C.rados_object_list_item)(unsafe.Pointer(&results[0])) + ret := C.rados_object_list(ioctx.ioctx, next, finish, pageResults, nil, filterLen, res, &next) + if ret < 0 { + return getError(ret) + } + + numEntries := int(ret) + for i := 0; i < numEntries; i++ { + item := results[i] + listFn(C.GoStringN(item.oid, (C.int)(item.oid_length))) + } + C.rados_object_list_free(C.size_t(ret), res) + + if C.rados_object_list_is_end(ioctx.ioctx, next) == listEndSentinel { + return nil + } + } +} + +// Stat returns the size of the object and its last modification time +func (ioctx *IOContext) Stat(object string) (stat ObjectStat, err error) { + var cPsize C.uint64_t + var cPmtime C.time_t + cObject := C.CString(object) + defer C.free(unsafe.Pointer(cObject)) + + ret := C.rados_stat( + ioctx.ioctx, + cObject, + &cPsize, + &cPmtime) + + if ret < 0 { + return ObjectStat{}, getError(ret) + } + return ObjectStat{ + Size: uint64(cPsize), + ModTime: time.Unix(int64(cPmtime), 0), + }, nil +} + +// GetXattr gets an xattr with key `name`, it returns the length of +// the key read or an error if not successful +func (ioctx *IOContext) GetXattr(object string, name string, data []byte) (int, error) { + cObject := C.CString(object) + cName := C.CString(name) + defer C.free(unsafe.Pointer(cObject)) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rados_getxattr( + ioctx.ioctx, + cObject, + cName, + (*C.char)(unsafe.Pointer(&data[0])), + (C.size_t)(len(data))) + + if ret >= 0 { + return int(ret), nil + } + return 0, getError(ret) +} + +// SetXattr sets an xattr for an object with key `name` with value as `data` +func (ioctx *IOContext) SetXattr(object string, name string, data []byte) error { + cObject := C.CString(object) + cName := C.CString(name) + defer C.free(unsafe.Pointer(cObject)) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rados_setxattr( + ioctx.ioctx, + cObject, + cName, + (*C.char)(unsafe.Pointer(&data[0])), + (C.size_t)(len(data))) + + return getError(ret) +} + +// ListXattrs lists all the xattrs for an object. The xattrs are returned as a +// mapping of string keys and byte-slice values. +func (ioctx *IOContext) ListXattrs(oid string) (map[string][]byte, error) { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + var it C.rados_xattrs_iter_t + + ret := C.rados_getxattrs(ioctx.ioctx, coid, &it) + if ret < 0 { + return nil, getError(ret) + } + defer func() { C.rados_getxattrs_end(it) }() + m := make(map[string][]byte) + for { + var cName, cVal *C.char + var cLen C.size_t + defer C.free(unsafe.Pointer(cName)) + defer C.free(unsafe.Pointer(cVal)) + + ret := C.rados_getxattrs_next(it, &cName, &cVal, &cLen) + if ret < 0 { + return nil, getError(ret) + } + // rados api returns a null name,val & 0-length upon + // end of iteration + if cName == nil { + return m, nil // stop iteration + } + m[C.GoString(cName)] = C.GoBytes(unsafe.Pointer(cVal), (C.int)(cLen)) + } +} + +// RmXattr removes an xattr with key `name` from object `oid` +func (ioctx *IOContext) RmXattr(oid string, name string) error { + coid := C.CString(oid) + cName := C.CString(name) + defer C.free(unsafe.Pointer(coid)) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rados_rmxattr( + ioctx.ioctx, + coid, + cName) + + return getError(ret) +} + +// LockExclusive takes an exclusive lock on an object. +func (ioctx *IOContext) LockExclusive(oid, name, cookie, desc string, duration time.Duration, flags *byte) (int, error) { + coid := C.CString(oid) + cName := C.CString(name) + cCookie := C.CString(cookie) + cDesc := C.CString(desc) + + var cDuration C.struct_timeval + if duration != 0 { + tv := syscall.NsecToTimeval(duration.Nanoseconds()) + cDuration = C.struct_timeval{tv_sec: C.ceph_time_t(tv.Sec), tv_usec: C.ceph_suseconds_t(tv.Usec)} + } + + var cFlags C.uint8_t + if flags != nil { + cFlags = C.uint8_t(*flags) + } + + defer C.free(unsafe.Pointer(coid)) + defer C.free(unsafe.Pointer(cName)) + defer C.free(unsafe.Pointer(cCookie)) + defer C.free(unsafe.Pointer(cDesc)) + + ret := C.rados_lock_exclusive( + ioctx.ioctx, + coid, + cName, + cCookie, + cDesc, + &cDuration, + cFlags) + + // 0 on success, negative error code on failure + // -EBUSY if the lock is already held by another (client, cookie) pair + // -EEXIST if the lock is already held by the same (client, cookie) pair + + switch ret { + case 0: + return int(ret), nil + case -C.EBUSY: + return int(ret), nil + case -C.EEXIST: + return int(ret), nil + default: + return int(ret), getError(ret) + } +} + +// LockShared takes a shared lock on an object. +func (ioctx *IOContext) LockShared(oid, name, cookie, tag, desc string, duration time.Duration, flags *byte) (int, error) { + coid := C.CString(oid) + cName := C.CString(name) + cCookie := C.CString(cookie) + cTag := C.CString(tag) + cDesc := C.CString(desc) + + var cDuration C.struct_timeval + if duration != 0 { + tv := syscall.NsecToTimeval(duration.Nanoseconds()) + cDuration = C.struct_timeval{tv_sec: C.ceph_time_t(tv.Sec), tv_usec: C.ceph_suseconds_t(tv.Usec)} + } + + var cFlags C.uint8_t + if flags != nil { + cFlags = C.uint8_t(*flags) + } + + defer C.free(unsafe.Pointer(coid)) + defer C.free(unsafe.Pointer(cName)) + defer C.free(unsafe.Pointer(cCookie)) + defer C.free(unsafe.Pointer(cTag)) + defer C.free(unsafe.Pointer(cDesc)) + + ret := C.rados_lock_shared( + ioctx.ioctx, + coid, + cName, + cCookie, + cTag, + cDesc, + &cDuration, + cFlags) + + // 0 on success, negative error code on failure + // -EBUSY if the lock is already held by another (client, cookie) pair + // -EEXIST if the lock is already held by the same (client, cookie) pair + + switch ret { + case 0: + return int(ret), nil + case -C.EBUSY: + return int(ret), nil + case -C.EEXIST: + return int(ret), nil + default: + return int(ret), getError(ret) + } +} + +// Unlock releases a shared or exclusive lock on an object. +func (ioctx *IOContext) Unlock(oid, name, cookie string) (int, error) { + coid := C.CString(oid) + cName := C.CString(name) + cCookie := C.CString(cookie) + + defer C.free(unsafe.Pointer(coid)) + defer C.free(unsafe.Pointer(cName)) + defer C.free(unsafe.Pointer(cCookie)) + + // 0 on success, negative error code on failure + // -ENOENT if the lock is not held by the specified (client, cookie) pair + + ret := C.rados_unlock( + ioctx.ioctx, + coid, + cName, + cCookie) + + switch ret { + case 0: + return int(ret), nil + case -C.ENOENT: + return int(ret), nil + default: + return int(ret), getError(ret) + } +} + +// ListLockers lists clients that have locked the named object lock and +// information about the lock. +// The number of bytes required in each buffer is put in the corresponding size +// out parameter. If any of the provided buffers are too short, -ERANGE is +// returned after these sizes are filled in. +func (ioctx *IOContext) ListLockers(oid, name string) (*LockInfo, error) { + coid := C.CString(oid) + cName := C.CString(name) + + cTag := (*C.char)(C.malloc(C.size_t(1024))) + cClients := (*C.char)(C.malloc(C.size_t(1024))) + cCookies := (*C.char)(C.malloc(C.size_t(1024))) + cAddrs := (*C.char)(C.malloc(C.size_t(1024))) + + var cExclusive C.int + cTagLen := C.size_t(1024) + cClientsLen := C.size_t(1024) + cCookiesLen := C.size_t(1024) + cAddrsLen := C.size_t(1024) + + defer C.free(unsafe.Pointer(coid)) + defer C.free(unsafe.Pointer(cName)) + defer C.free(unsafe.Pointer(cTag)) + defer C.free(unsafe.Pointer(cClients)) + defer C.free(unsafe.Pointer(cCookies)) + defer C.free(unsafe.Pointer(cAddrs)) + + ret := C.rados_list_lockers( + ioctx.ioctx, + coid, + cName, + &cExclusive, + cTag, + &cTagLen, + cClients, + &cClientsLen, + cCookies, + &cCookiesLen, + cAddrs, + &cAddrsLen) + + splitCString := func(items *C.char, itemsLen C.size_t) []string { + currLen := 0 + clients := []string{} + for currLen < int(itemsLen) { + client := C.GoString(C.nextChunk(&items)) + clients = append(clients, client) + currLen += len(client) + 1 + } + return clients + } + + if ret < 0 { + return nil, radosError(ret) + } + return &LockInfo{int(ret), cExclusive == 1, C.GoString(cTag), splitCString(cClients, cClientsLen), splitCString(cCookies, cCookiesLen), splitCString(cAddrs, cAddrsLen)}, nil +} + +// BreakLock releases a shared or exclusive lock on an object, which was taken by the specified client. +func (ioctx *IOContext) BreakLock(oid, name, client, cookie string) (int, error) { + coid := C.CString(oid) + cName := C.CString(name) + cClient := C.CString(client) + cCookie := C.CString(cookie) + + defer C.free(unsafe.Pointer(coid)) + defer C.free(unsafe.Pointer(cName)) + defer C.free(unsafe.Pointer(cClient)) + defer C.free(unsafe.Pointer(cCookie)) + + // 0 on success, negative error code on failure + // -ENOENT if the lock is not held by the specified (client, cookie) pair + // -EINVAL if the client cannot be parsed + + ret := C.rados_break_lock( + ioctx.ioctx, + coid, + cName, + cClient, + cCookie) + + switch ret { + case 0: + return int(ret), nil + case -C.ENOENT: + return int(ret), nil + case -C.EINVAL: // -EINVAL + return int(ret), nil + default: + return int(ret), getError(ret) + } +} + +// GetLastVersion will return the version number of the last object read or +// written to. +// +// Implements: +// +// uint64_t rados_get_last_version(rados_ioctx_t io); +func (ioctx *IOContext) GetLastVersion() (uint64, error) { + if err := ioctx.validate(); err != nil { + return 0, err + } + v := C.rados_get_last_version(ioctx.ioctx) + return uint64(v), nil +} + +// GetNamespace gets the namespace used for objects within this IO context. +// +// Implements: +// +// int rados_ioctx_get_namespace(rados_ioctx_t io, char *buf, +// unsigned maxlen); +func (ioctx *IOContext) GetNamespace() (string, error) { + if err := ioctx.validate(); err != nil { + return "", err + } + var ( + err error + buf []byte + ret C.int + ) + retry.WithSizes(128, 8192, func(size int) retry.Hint { + buf = make([]byte, size) + ret = C.rados_ioctx_get_namespace( + ioctx.ioctx, + (*C.char)(unsafe.Pointer(&buf[0])), + C.unsigned(len(buf))) + err = getErrorIfNegative(ret) + return retry.DoubleSize.If(err == errRange) + }) + if err != nil { + return "", err + } + return string(buf[:ret]), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx_nautilus.go b/vendor/github.com/ceph/go-ceph/rados/ioctx_nautilus.go new file mode 100644 index 0000000000..7b7aacc2ba --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/ioctx_nautilus.go @@ -0,0 +1,37 @@ +//go:build nautilus +// +build nautilus + +package rados + +// #cgo LDFLAGS: -lrados +// #include +// +import "C" + +// SetPoolFullTry makes sure to send requests to the cluster despite +// the cluster or pool being marked full; ops will either succeed(e.g., delete) +// or return EDQUOT or ENOSPC. +// +// Implements: +// +// void rados_set_osdmap_full_try(rados_ioctx_t io); +func (ioctx *IOContext) SetPoolFullTry() error { + if err := ioctx.validate(); err != nil { + return err + } + C.rados_set_osdmap_full_try(ioctx.ioctx) + return nil +} + +// UnsetPoolFullTry unsets the flag set by SetPoolFullTry() +// +// Implements: +// +// void rados_unset_osdmap_full_try(rados_ioctx_t io); +func (ioctx *IOContext) UnsetPoolFullTry() error { + if err := ioctx.validate(); err != nil { + return err + } + C.rados_unset_osdmap_full_try(ioctx.ioctx) + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx_octopus.go b/vendor/github.com/ceph/go-ceph/rados/ioctx_octopus.go new file mode 100644 index 0000000000..4ea03bfe17 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/ioctx_octopus.go @@ -0,0 +1,40 @@ +//go:build !nautilus +// +build !nautilus + +package rados + +// #cgo LDFLAGS: -lrados +// #include +// +import "C" + +// Ceph octopus deprecates rados_set_osdmap_full_try() and implements rados_set_pool_full_try() +// Ceph octopus deprecates rados_unset_osdmap_full_try() and implements rados_unset_pool_full_try() + +// SetPoolFullTry makes sure to send requests to the cluster despite +// the cluster or pool being marked full; ops will either succeed(e.g., delete) +// or return EDQUOT or ENOSPC. +// +// Implements: +// +// void rados_set_pool_full_try(rados_ioctx_t io); +func (ioctx *IOContext) SetPoolFullTry() error { + if err := ioctx.validate(); err != nil { + return err + } + C.rados_set_pool_full_try(ioctx.ioctx) + return nil +} + +// UnsetPoolFullTry unsets the flag set by SetPoolFullTry() +// +// Implements: +// +// void rados_unset_pool_full_try(rados_ioctx_t io); +func (ioctx *IOContext) UnsetPoolFullTry() error { + if err := ioctx.validate(); err != nil { + return err + } + C.rados_unset_pool_full_try(ioctx.ioctx) + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx_pool_alignment.go b/vendor/github.com/ceph/go-ceph/rados/ioctx_pool_alignment.go new file mode 100644 index 0000000000..4aee4b6751 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/ioctx_pool_alignment.go @@ -0,0 +1,25 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +// Alignment returns the required stripe size in bytes for pools supporting/requiring it, or an error if unsuccessful. +// For an EC pool, a buffer size multiple of its stripe size is required to call Append. To know if the pool requires +// alignment or not, use RequiresAlignment. +// +// Implements: +// +// int rados_ioctx_pool_required_alignment2(rados_ioctx_t io, uint64_t *alignment) +func (ioctx *IOContext) Alignment() (uint64, error) { + var alignSizeBytes C.uint64_t + ret := C.rados_ioctx_pool_required_alignment2( + ioctx.ioctx, + &alignSizeBytes) + if ret != 0 { + return 0, getError(ret) + } + return uint64(alignSizeBytes), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx_pool_requires_alignment.go b/vendor/github.com/ceph/go-ceph/rados/ioctx_pool_requires_alignment.go new file mode 100644 index 0000000000..6420d2b516 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/ioctx_pool_requires_alignment.go @@ -0,0 +1,25 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +// RequiresAlignment returns true if the pool supports/requires alignment or an error if not successful. +// For an EC pool, a buffer size multiple of its stripe size is required to call Append. See +// Alignment to know how to get the stripe size for pools requiring it. +// +// Implements: +// +// int rados_ioctx_pool_requires_alignment2(rados_ioctx_t io, int *req) +func (ioctx *IOContext) RequiresAlignment() (bool, error) { + var alignRequired C.int + ret := C.rados_ioctx_pool_requires_alignment2( + ioctx.ioctx, + &alignRequired) + if ret != 0 { + return false, getError(ret) + } + return (alignRequired != 0), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/ioctx_set_alloc_hint.go b/vendor/github.com/ceph/go-ceph/rados/ioctx_set_alloc_hint.go new file mode 100644 index 0000000000..b2f2fd08bb --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/ioctx_set_alloc_hint.go @@ -0,0 +1,36 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +import ( + "unsafe" +) + +// SetAllocationHint sets allocation hint for an object. This is an advisory +// operation, it will always succeed (as if it was submitted with a +// LIBRADOS_OP_FLAG_FAILOK flag set) and is not guaranteed to do anything on +// the backend. +// +// Implements: +// +// int rados_set_alloc_hint2(rados_ioctx_t io, +// const char *o, +// uint64_t expected_object_size, +// uint64_t expected_write_size, +// uint32_t flags); +func (ioctx *IOContext) SetAllocationHint(oid string, expectedObjectSize uint64, expectedWriteSize uint64, flags AllocHintFlags) error { + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + + return getError(C.rados_set_alloc_hint2( + ioctx.ioctx, + coid, + (C.uint64_t)(expectedObjectSize), + (C.uint64_t)(expectedWriteSize), + (C.uint32_t)(flags), + )) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/object_iter.go b/vendor/github.com/ceph/go-ceph/rados/object_iter.go new file mode 100644 index 0000000000..025dca1601 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/object_iter.go @@ -0,0 +1,92 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// +import "C" + +// Iter supports iterating over objects in the ioctx. +type Iter struct { + ctx C.rados_list_ctx_t + err error + entry string + namespace string +} + +// IterToken supports reporting on and seeking to different positions. +type IterToken uint32 + +// Iter returns a Iterator object that can be used to list the object names in the current pool +func (ioctx *IOContext) Iter() (*Iter, error) { + iter := Iter{} + if cerr := C.rados_nobjects_list_open(ioctx.ioctx, &iter.ctx); cerr < 0 { + return nil, getError(cerr) + } + return &iter, nil +} + +// Token returns a token marking the current position of the iterator. To be used in combination with Iter.Seek() +func (iter *Iter) Token() IterToken { + return IterToken(C.rados_nobjects_list_get_pg_hash_position(iter.ctx)) +} + +// Seek moves the iterator to the position indicated by the token. +func (iter *Iter) Seek(token IterToken) { + C.rados_nobjects_list_seek(iter.ctx, C.uint32_t(token)) +} + +// Next retrieves the next object name in the pool/namespace iterator. +// Upon a successful invocation (return value of true), the Value method should +// be used to obtain the name of the retrieved object name. When the iterator is +// exhausted, Next returns false. The Err method should used to verify whether the +// end of the iterator was reached, or the iterator received an error. +// +// Example: +// +// iter := pool.Iter() +// defer iter.Close() +// for iter.Next() { +// fmt.Printf("%v\n", iter.Value()) +// } +// return iter.Err() +func (iter *Iter) Next() bool { + var cEntry *C.char + var cNamespace *C.char + if cerr := C.rados_nobjects_list_next(iter.ctx, &cEntry, nil, &cNamespace); cerr < 0 { + iter.err = getError(cerr) + return false + } + iter.entry = C.GoString(cEntry) + iter.namespace = C.GoString(cNamespace) + return true +} + +// Value returns the current value of the iterator (object name), after a successful call to Next. +func (iter *Iter) Value() string { + if iter.err != nil { + return "" + } + return iter.entry +} + +// Namespace returns the namespace associated with the current value of the iterator (object name), after a successful call to Next. +func (iter *Iter) Namespace() string { + if iter.err != nil { + return "" + } + return iter.namespace +} + +// Err checks whether the iterator has encountered an error. +func (iter *Iter) Err() error { + if iter.err == ErrNotFound { + return nil + } + return iter.err +} + +// Close the iterator cursor on the server. Be aware that iterators are not closed automatically +// at the end of iteration. +func (iter *Iter) Close() { + C.rados_nobjects_list_close(iter.ctx) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/omap.go b/vendor/github.com/ceph/go-ceph/rados/omap.go new file mode 100644 index 0000000000..525826ba1a --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/omap.go @@ -0,0 +1,205 @@ +package rados + +/* +#cgo LDFLAGS: -lrados +#include +#include +*/ +import "C" + +import ( + "runtime" + "unsafe" +) + +// OmapKeyValue items are returned by the GetOmapStep's Next call. +type OmapKeyValue struct { + Key string + Value []byte +} + +// GetOmapStep values are used to get the results of an GetOmapValues call +// on a WriteOp. Until the Operate method of the WriteOp is called the Next +// call will return an error. After Operate is called, the Next call will +// return valid results. +// +// The life cycle of the GetOmapStep is bound to the ReadOp, if the ReadOp +// Release method is called the public methods of the step must no longer be +// used and may return errors. +type GetOmapStep struct { + // C returned data: + iter C.rados_omap_iter_t + more *C.uchar + rval *C.int + + // internal state: + + // canIterate is only set after the operation is performed and is + // intended to prevent premature fetching of data + canIterate bool +} + +func newGetOmapStep() *GetOmapStep { + gos := &GetOmapStep{ + more: (*C.uchar)(C.malloc(C.sizeof_uchar)), + rval: (*C.int)(C.malloc(C.sizeof_int)), + } + runtime.SetFinalizer(gos, opStepFinalizer) + return gos +} + +func (gos *GetOmapStep) free() { + gos.canIterate = false + if gos.iter != nil { + C.rados_omap_get_end(gos.iter) + } + gos.iter = nil + C.free(unsafe.Pointer(gos.more)) + gos.more = nil + C.free(unsafe.Pointer(gos.rval)) + gos.rval = nil +} + +func (gos *GetOmapStep) update() error { + err := getError(*gos.rval) + gos.canIterate = (err == nil) + return err +} + +// Next returns the next key value pair or nil if iteration is exhausted. +func (gos *GetOmapStep) Next() (*OmapKeyValue, error) { + if !gos.canIterate { + return nil, ErrOperationIncomplete + } + var ( + cKey *C.char + cVal *C.char + cKeyLen C.size_t + cValLen C.size_t + ) + ret := C.rados_omap_get_next2(gos.iter, &cKey, &cVal, &cKeyLen, &cValLen) + if ret != 0 { + return nil, getError(ret) + } + if cKey == nil { + return nil, nil + } + return &OmapKeyValue{ + Key: string(C.GoBytes(unsafe.Pointer(cKey), C.int(cKeyLen))), + Value: C.GoBytes(unsafe.Pointer(cVal), C.int(cValLen)), + }, nil +} + +// More returns true if there are more matching keys available. +func (gos *GetOmapStep) More() bool { + // tad bit hacky, but go can't automatically convert from + // unsigned char to bool + return *gos.more != 0 +} + +// SetOmap appends the map `pairs` to the omap `oid` +func (ioctx *IOContext) SetOmap(oid string, pairs map[string][]byte) error { + op := CreateWriteOp() + defer op.Release() + op.SetOmap(pairs) + return op.operateCompat(ioctx, oid) +} + +// OmapListFunc is the type of the function called for each omap key +// visited by ListOmapValues +type OmapListFunc func(key string, value []byte) + +// ListOmapValues iterates over the keys and values in an omap by way of +// a callback function. +// +// `startAfter`: iterate only on the keys after this specified one +// `filterPrefix`: iterate only on the keys beginning with this prefix +// `maxReturn`: iterate no more than `maxReturn` key/value pairs +// `listFn`: the function called at each iteration +func (ioctx *IOContext) ListOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64, listFn OmapListFunc) error { + + op := CreateReadOp() + defer op.Release() + gos := op.GetOmapValues(startAfter, filterPrefix, uint64(maxReturn)) + err := op.operateCompat(ioctx, oid) + if err != nil { + return err + } + + for { + kv, err := gos.Next() + if err != nil { + return err + } + if kv == nil { + break + } + listFn(kv.Key, kv.Value) + } + return nil +} + +// GetOmapValues fetches a set of keys and their values from an omap and returns then as a map +// `startAfter`: retrieve only the keys after this specified one +// `filterPrefix`: retrieve only the keys beginning with this prefix +// `maxReturn`: retrieve no more than `maxReturn` key/value pairs +func (ioctx *IOContext) GetOmapValues(oid string, startAfter string, filterPrefix string, maxReturn int64) (map[string][]byte, error) { + omap := map[string][]byte{} + + err := ioctx.ListOmapValues( + oid, startAfter, filterPrefix, maxReturn, + func(key string, value []byte) { + omap[key] = value + }, + ) + + return omap, err +} + +// GetAllOmapValues fetches all the keys and their values from an omap and returns then as a map +// `startAfter`: retrieve only the keys after this specified one +// `filterPrefix`: retrieve only the keys beginning with this prefix +// `iteratorSize`: internal number of keys to fetch during a read operation +func (ioctx *IOContext) GetAllOmapValues(oid string, startAfter string, filterPrefix string, iteratorSize int64) (map[string][]byte, error) { + omap := map[string][]byte{} + omapSize := 0 + + for { + err := ioctx.ListOmapValues( + oid, startAfter, filterPrefix, iteratorSize, + func(key string, value []byte) { + omap[key] = value + startAfter = key + }, + ) + + if err != nil { + return omap, err + } + + // End of omap + if len(omap) == omapSize { + break + } + + omapSize = len(omap) + } + + return omap, nil +} + +// RmOmapKeys removes the specified `keys` from the omap `oid` +func (ioctx *IOContext) RmOmapKeys(oid string, keys []string) error { + op := CreateWriteOp() + defer op.Release() + op.RmOmapKeys(keys) + return op.operateCompat(ioctx, oid) +} + +// CleanOmap clears the omap `oid` +func (ioctx *IOContext) CleanOmap(oid string) error { + op := CreateWriteOp() + defer op.Release() + op.CleanOmap() + return op.operateCompat(ioctx, oid) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/operation.go b/vendor/github.com/ceph/go-ceph/rados/operation.go new file mode 100644 index 0000000000..d9d644cbd3 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/operation.go @@ -0,0 +1,154 @@ +package rados + +// #include +import "C" + +import ( + "fmt" + "strings" + "unsafe" + + "github.com/ceph/go-ceph/internal/log" +) + +// The file operation.go exists to support both read op and write op types that +// have some pretty common behaviors between them. In C/C++ its assumed that +// the buffer types and other pointers will not be freed between passing them +// to the action setup calls (things like rados_write_op_write or +// rados_read_op_omap_get_vals2) and the call to Operate(...). Since there's +// nothing stopping one from sleeping for hours between these calls, or passing +// the op to other functions and calling Operate there, we want a mechanism +// that's (fairly) simple to understand and won't run afoul of Go's garbage +// collection. That's one reason the operation type tracks the steps (the +// parts that track complex inputs and outputs) so that as long as the op +// exists it will have a reference to the step, which will have references +// to the C language types. + +type opKind string + +const ( + readOp opKind = "read" + writeOp opKind = "write" +) + +// OperationError is an error type that may be returned by an Operate call. +// It captures the error from the operate call itself and any errors from +// steps that can return an error. +type OperationError struct { + kind opKind + OpError error + StepErrors map[int]error +} + +func (e OperationError) Error() string { + subErrors := []string{} + if e.OpError != nil { + subErrors = append(subErrors, + fmt.Sprintf("op=%s", e.OpError)) + } + for idx, es := range e.StepErrors { + subErrors = append(subErrors, + fmt.Sprintf("Step#%d=%s", idx, es)) + } + return fmt.Sprintf( + "%s operation error: %s", + e.kind, + strings.Join(subErrors, ", ")) +} + +// opStep provides an interface for types that are tied to the management of +// data being input or output from write ops and read ops. The steps are +// meant to simplify the internals of the ops themselves and be exportable when +// appropriate. If a step is not being exported it should not be returned +// from an ops action function. If the step is exported it should be +// returned from an ops action function. +// +// Not all types implementing opStep are expected to need all the functions +// in the interface. However, for the sake of simplicity on the op side, we use +// the same interface for all cases and expect those implementing opStep +// just embed the without* types that provide no-op implementation of +// functions that make up this interface. +type opStep interface { + // update the state of the step after the call to Operate. + // It can be used to convert values from C and cache them and/or + // communicate a failure of the action associated with the step. The + // update call will only be made once. Implementations are not required to + // handle this call being made more than once. + update() error + // free will be called to free any resources, especially C memory, that + // the step is managing. The behavior of free should be idempotent and + // handle being called more than once. + free() +} + +// operation represents some of the shared underlying mechanisms for +// both read and write op types. +type operation struct { + steps []opStep +} + +// free will call the free method of all the steps this operation +// contains. +func (o *operation) free() { + for i := range o.steps { + o.steps[i].free() + } +} + +// update the operation and the steps it contains. The top-level result +// of the rados call is passed in as ret and used to construct errors. +// The update call of each step is used to update the contents of each +// step and gather any errors from those steps. +func (o *operation) update(kind opKind, ret C.int) error { + stepErrors := map[int]error{} + for i := range o.steps { + if err := o.steps[i].update(); err != nil { + stepErrors[i] = err + } + } + if ret == 0 && len(stepErrors) == 0 { + return nil + } + return OperationError{ + kind: kind, + OpError: getError(ret), + StepErrors: stepErrors, + } +} + +func opStepFinalizer(s opStep) { + if s != nil { + log.Warnf("unreachable opStep object found. Cleaning up.") + s.free() + } +} + +// withoutUpdate can be embedded in a struct to help indicate +// the type implements the opStep interface but has a no-op +// update function. +type withoutUpdate struct{} + +func (*withoutUpdate) update() error { return nil } + +// withoutFree can be embedded in a struct to help indicate +// the type implements the opStep interface but has a no-op +// free function. +type withoutFree struct{} + +func (*withoutFree) free() {} + +// withRefs is a embeddable type to help track and free C memory. +type withRefs struct { + refs []unsafe.Pointer +} + +func (w *withRefs) free() { + for i := range w.refs { + C.free(w.refs[i]) + } + w.refs = nil +} + +func (w *withRefs) add(ptr unsafe.Pointer) { + w.refs = append(w.refs, ptr) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/operation_flags.go b/vendor/github.com/ceph/go-ceph/rados/operation_flags.go new file mode 100644 index 0000000000..957f4f2b21 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/operation_flags.go @@ -0,0 +1,37 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// #include +// +import "C" + +// OperationFlags control the behavior of read and write operations. +type OperationFlags int + +const ( + // OperationNoFlag indicates no special behavior is requested. + OperationNoFlag = OperationFlags(C.LIBRADOS_OPERATION_NOFLAG) + // OperationBalanceReads TODO + OperationBalanceReads = OperationFlags(C.LIBRADOS_OPERATION_BALANCE_READS) + // OperationLocalizeReads TODO + OperationLocalizeReads = OperationFlags(C.LIBRADOS_OPERATION_LOCALIZE_READS) + // OperationOrderReadsWrites TODO + OperationOrderReadsWrites = OperationFlags(C.LIBRADOS_OPERATION_ORDER_READS_WRITES) + // OperationIgnoreCache TODO + OperationIgnoreCache = OperationFlags(C.LIBRADOS_OPERATION_IGNORE_CACHE) + // OperationSkipRWLocks TODO + OperationSkipRWLocks = OperationFlags(C.LIBRADOS_OPERATION_SKIPRWLOCKS) + // OperationIgnoreOverlay TODO + OperationIgnoreOverlay = OperationFlags(C.LIBRADOS_OPERATION_IGNORE_OVERLAY) + // OperationFullTry send request to a full cluster or pool, ops such as delete + // can succeed while other ops will return out-of-space errors. + OperationFullTry = OperationFlags(C.LIBRADOS_OPERATION_FULL_TRY) + // OperationFullForce TODO + OperationFullForce = OperationFlags(C.LIBRADOS_OPERATION_FULL_FORCE) + // OperationIgnoreRedirect TODO + OperationIgnoreRedirect = OperationFlags(C.LIBRADOS_OPERATION_IGNORE_REDIRECT) + // OperationOrderSnap TODO + OperationOrderSnap = OperationFlags(C.LIBRADOS_OPERATION_ORDERSNAP) +) diff --git a/vendor/github.com/ceph/go-ceph/rados/rados.go b/vendor/github.com/ceph/go-ceph/rados/rados.go new file mode 100644 index 0000000000..98d77ca8a3 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados.go @@ -0,0 +1,130 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// #include +import "C" + +import ( + "runtime" + "unsafe" + + "github.com/ceph/go-ceph/internal/log" +) + +const ( + // AllNamespaces is used to reset a selected namespace to all + // namespaces. See the IOContext SetNamespace function. + AllNamespaces = C.LIBRADOS_ALL_NSPACES + + // FIXME: for backwards compatibility + + // RadosAllNamespaces is used to reset a selected namespace to all + // namespaces. See the IOContext SetNamespace function. + // + // Deprecated: use AllNamespaces instead + RadosAllNamespaces = AllNamespaces +) + +// OpFlags are flags that can be set on a per-op basis. +type OpFlags uint + +const ( + // OpFlagNone can be use to not set any flags. + OpFlagNone = OpFlags(0) + // OpFlagExcl marks an op to fail a create operation if the object + // already exists. + OpFlagExcl = OpFlags(C.LIBRADOS_OP_FLAG_EXCL) + // OpFlagFailOk allows the transaction to succeed even if the flagged + // op fails. + OpFlagFailOk = OpFlags(C.LIBRADOS_OP_FLAG_FAILOK) + // OpFlagFAdviseRandom indicates read/write op random. + OpFlagFAdviseRandom = OpFlags(C.LIBRADOS_OP_FLAG_FADVISE_RANDOM) + // OpFlagFAdviseSequential indicates read/write op sequential. + OpFlagFAdviseSequential = OpFlags(C.LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL) + // OpFlagFAdviseWillNeed indicates read/write data will be accessed in + // the near future (by someone). + OpFlagFAdviseWillNeed = OpFlags(C.LIBRADOS_OP_FLAG_FADVISE_WILLNEED) + // OpFlagFAdviseDontNeed indicates read/write data will not accessed in + // the near future (by anyone). + OpFlagFAdviseDontNeed = OpFlags(C.LIBRADOS_OP_FLAG_FADVISE_DONTNEED) + // OpFlagFAdviseNoCache indicates read/write data will not accessed + // again (by *this* client). + OpFlagFAdviseNoCache = OpFlags(C.LIBRADOS_OP_FLAG_FADVISE_NOCACHE) +) + +// Version returns the major, minor, and patch components of the version of +// the RADOS library linked against. +func Version() (int, int, int) { + var cMajor, cMinor, cPatch C.int + C.rados_version(&cMajor, &cMinor, &cPatch) + return int(cMajor), int(cMinor), int(cPatch) +} + +func makeConn() *Conn { + return &Conn{connected: false} +} + +func newConn(user *C.char) (*Conn, error) { + conn := makeConn() + ret := C.rados_create(&conn.cluster, user) + + if ret != 0 { + return nil, getError(ret) + } + + runtime.SetFinalizer(conn, freeConn) + return conn, nil +} + +// NewConn creates a new connection object. It returns the connection and an +// error, if any. +func NewConn() (*Conn, error) { + return newConn(nil) +} + +// NewConnWithUser creates a new connection object with a custom username. +// It returns the connection and an error, if any. +func NewConnWithUser(user string) (*Conn, error) { + cUser := C.CString(user) + defer C.free(unsafe.Pointer(cUser)) + return newConn(cUser) +} + +// NewConnWithClusterAndUser creates a new connection object for a specific cluster and username. +// It returns the connection and an error, if any. +func NewConnWithClusterAndUser(clusterName string, userName string) (*Conn, error) { + cClusterName := C.CString(clusterName) + defer C.free(unsafe.Pointer(cClusterName)) + + cName := C.CString(userName) + defer C.free(unsafe.Pointer(cName)) + + conn := makeConn() + ret := C.rados_create2(&conn.cluster, cClusterName, cName, 0) + if ret != 0 { + return nil, getError(ret) + } + + runtime.SetFinalizer(conn, freeConn) + return conn, nil +} + +// freeConn releases resources that are allocated while configuring the +// connection to the cluster. rados_shutdown() should only be needed after a +// successful call to rados_connect(), however if the connection has been +// configured with non-default parameters, some of the parameters may be +// allocated before connecting. rados_shutdown() will free the allocated +// resources, even if there has not been a connection yet. +// +// This function is setup as a destructor/finalizer when rados_create() is +// called. +func freeConn(conn *Conn) { + if conn.cluster != nil { + log.Warnf("unreachable Conn object has not been shut down. Cleaning up.") + C.rados_shutdown(conn.cluster) + // prevent calling rados_shutdown() more than once + conn.cluster = nil + } +} diff --git a/vendor/github.com/ceph/go-ceph/rados/rados_nautilus.go b/vendor/github.com/ceph/go-ceph/rados/rados_nautilus.go new file mode 100644 index 0000000000..74be1b7c51 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados_nautilus.go @@ -0,0 +1,13 @@ +//go:build !mimic +// +build !mimic + +package rados + +// #include +import "C" + +const ( + // OpFlagFAdviseFUA optionally support FUA (force unit access) on write + // requests. + OpFlagFAdviseFUA = OpFlags(C.LIBRADOS_OP_FLAG_FADVISE_FUA) +) diff --git a/vendor/github.com/ceph/go-ceph/rados/rados_read_op_assert_version.go b/vendor/github.com/ceph/go-ceph/rados/rados_read_op_assert_version.go new file mode 100644 index 0000000000..ac413e4be8 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados_read_op_assert_version.go @@ -0,0 +1,19 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +// AssertVersion ensures that the object exists and that its internal version +// number is equal to "ver" before reading. "ver" should be a version number +// previously obtained with IOContext.GetLastVersion(). +// +// Implements: +// +// void rados_read_op_assert_version(rados_read_op_t read_op, +// uint64_t ver) +func (r *ReadOp) AssertVersion(ver uint64) { + C.rados_read_op_assert_version(r.op, C.uint64_t(ver)) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/rados_set_locator.go b/vendor/github.com/ceph/go-ceph/rados/rados_set_locator.go new file mode 100644 index 0000000000..f76b9cc6a2 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados_set_locator.go @@ -0,0 +1,28 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +import ( + "unsafe" +) + +// SetLocator sets the key for mapping objects to pgs within an io context. +// Until a different locator key is set, all objects in this io context will be placed in the same pg. +// To reset the locator, an empty string must be set. +// +// Implements: +// +// void rados_ioctx_locator_set_key(rados_ioctx_t io, const char *key); +func (ioctx *IOContext) SetLocator(locator string) { + if locator == "" { + C.rados_ioctx_locator_set_key(ioctx.ioctx, nil) + } else { + var cLoc *C.char = C.CString(locator) + defer C.free(unsafe.Pointer(cLoc)) + C.rados_ioctx_locator_set_key(ioctx.ioctx, cLoc) + } +} diff --git a/vendor/github.com/ceph/go-ceph/rados/rados_write_op_assert_version.go b/vendor/github.com/ceph/go-ceph/rados/rados_write_op_assert_version.go new file mode 100644 index 0000000000..622bd383a7 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados_write_op_assert_version.go @@ -0,0 +1,19 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +// AssertVersion ensures that the object exists and that its internal version +// number is equal to "ver" before writing. "ver" should be a version number +// previously obtained with IOContext.GetLastVersion(). +// +// Implements: +// +// void rados_read_op_assert_version(rados_read_op_t read_op, +// uint64_t ver) +func (w *WriteOp) AssertVersion(ver uint64) { + C.rados_write_op_assert_version(w.op, C.uint64_t(ver)) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/rados_write_op_remove.go b/vendor/github.com/ceph/go-ceph/rados/rados_write_op_remove.go new file mode 100644 index 0000000000..234726d1fd --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados_write_op_remove.go @@ -0,0 +1,16 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +// Remove object. +// +// Implements: +// +// void rados_write_op_remove(rados_write_op_t write_op) +func (w *WriteOp) Remove() { + C.rados_write_op_remove(w.op) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/rados_write_op_setxattr.go b/vendor/github.com/ceph/go-ceph/rados/rados_write_op_setxattr.go new file mode 100644 index 0000000000..0a984afcd4 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/rados_write_op_setxattr.go @@ -0,0 +1,31 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +import ( + "unsafe" +) + +// SetXattr sets an xattr. +// +// Implements: +// +// void rados_write_op_setxattr(rados_write_op_t write_op, +// const char * name, +// const char * value, +// size_t value_len) +func (w *WriteOp) SetXattr(name string, value []byte) { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + C.rados_write_op_setxattr( + w.op, + cName, + (*C.char)(unsafe.Pointer(&value[0])), + C.size_t(len(value)), + ) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/read_op.go b/vendor/github.com/ceph/go-ceph/rados/read_op.go new file mode 100644 index 0000000000..5c1de381f1 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/read_op.go @@ -0,0 +1,91 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// #include +// +import "C" + +import ( + "unsafe" +) + +// ReadOp manages a set of discrete object read actions that will be performed +// together atomically. +type ReadOp struct { + operation + op C.rados_read_op_t +} + +// CreateReadOp returns a newly constructed read operation. +func CreateReadOp() *ReadOp { + return &ReadOp{ + op: C.rados_create_read_op(), + } +} + +// Release the resources associated with this read operation. +func (r *ReadOp) Release() { + C.rados_release_read_op(r.op) + r.op = nil + r.free() +} + +// Operate will perform the operation(s). +func (r *ReadOp) Operate(ioctx *IOContext, oid string, flags OperationFlags) error { + if err := ioctx.validate(); err != nil { + return err + } + + cOid := C.CString(oid) + defer C.free(unsafe.Pointer(cOid)) + + ret := C.rados_read_op_operate(r.op, ioctx.ioctx, cOid, C.int(flags)) + return r.update(readOp, ret) +} + +func (r *ReadOp) operateCompat(ioctx *IOContext, oid string) error { + switch err := r.Operate(ioctx, oid, OperationNoFlag).(type) { + case nil: + return nil + case OperationError: + return err.OpError + default: + return err + } +} + +// AssertExists assures the object targeted by the read op exists. +// +// Implements: +// +// void rados_read_op_assert_exists(rados_read_op_t read_op); +func (r *ReadOp) AssertExists() { + C.rados_read_op_assert_exists(r.op) +} + +// GetOmapValues is used to iterate over a set, or sub-set, of omap keys +// as part of a read operation. An GetOmapStep is returned from this +// function. The GetOmapStep may be used to iterate over the key-value +// pairs after the Operate call has been performed. +func (r *ReadOp) GetOmapValues(startAfter, filterPrefix string, maxReturn uint64) *GetOmapStep { + gos := newGetOmapStep() + r.steps = append(r.steps, gos) + + cStartAfter := C.CString(startAfter) + cFilterPrefix := C.CString(filterPrefix) + defer C.free(unsafe.Pointer(cStartAfter)) + defer C.free(unsafe.Pointer(cFilterPrefix)) + + C.rados_read_op_omap_get_vals2( + r.op, + cStartAfter, + cFilterPrefix, + C.uint64_t(maxReturn), + &gos.iter, + gos.more, + gos.rval, + ) + return gos +} diff --git a/vendor/github.com/ceph/go-ceph/rados/read_op_omap_get_vals_by_keys.go b/vendor/github.com/ceph/go-ceph/rados/read_op_omap_get_vals_by_keys.go new file mode 100644 index 0000000000..c2f1383494 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/read_op_omap_get_vals_by_keys.go @@ -0,0 +1,113 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" +) + +// ReadOpOmapGetValsByKeysStep holds the result of the +// GetOmapValuesByKeys read operation. +// Result is valid only after Operate() was called. +type ReadOpOmapGetValsByKeysStep struct { + // C arguments + + iter C.rados_omap_iter_t + prval *C.int + + // Internal state + + // canIterate is only set after the operation is performed and is + // intended to prevent premature fetching of data. + canIterate bool +} + +func newReadOpOmapGetValsByKeysStep() *ReadOpOmapGetValsByKeysStep { + s := &ReadOpOmapGetValsByKeysStep{ + prval: (*C.int)(C.malloc(C.sizeof_int)), + } + + return s +} + +func (s *ReadOpOmapGetValsByKeysStep) free() { + s.canIterate = false + C.rados_omap_get_end(s.iter) + + C.free(unsafe.Pointer(s.prval)) + s.prval = nil +} + +func (s *ReadOpOmapGetValsByKeysStep) update() error { + err := getError(*s.prval) + s.canIterate = (err == nil) + + return err +} + +// Next gets the next omap key/value pair referenced by +// ReadOpOmapGetValsByKeysStep's internal iterator. +// If there are no more elements to retrieve, (nil, nil) is returned. +// May be called only after Operate() finished. +func (s *ReadOpOmapGetValsByKeysStep) Next() (*OmapKeyValue, error) { + if !s.canIterate { + return nil, ErrOperationIncomplete + } + + var ( + cKey *C.char + cVal *C.char + cKeyLen C.size_t + cValLen C.size_t + ) + + ret := C.rados_omap_get_next2(s.iter, &cKey, &cVal, &cKeyLen, &cValLen) + if ret != 0 { + return nil, getError(ret) + } + + if cKey == nil { + // Iterator has reached the end of the list. + return nil, nil + } + + return &OmapKeyValue{ + Key: string(C.GoBytes(unsafe.Pointer(cKey), C.int(cKeyLen))), + Value: C.GoBytes(unsafe.Pointer(cVal), C.int(cValLen)), + }, nil +} + +// GetOmapValuesByKeys starts iterating over specific key/value pairs. +// +// Implements: +// +// void rados_read_op_omap_get_vals_by_keys2(rados_read_op_t read_op, +// char const * const * keys, +// size_t num_keys, +// const size_t * key_lens, +// rados_omap_iter_t * iter, +// int * prval) +func (r *ReadOp) GetOmapValuesByKeys(keys []string) *ReadOpOmapGetValsByKeysStep { + s := newReadOpOmapGetValsByKeysStep() + r.steps = append(r.steps, s) + + cKeys := cutil.NewBufferGroupStrings(keys) + defer cKeys.Free() + + C.rados_read_op_omap_get_vals_by_keys2( + r.op, + (**C.char)(cKeys.BuffersPtr()), + C.size_t(len(keys)), + (*C.size_t)(cKeys.LengthsPtr()), + &s.iter, + s.prval, + ) + + return s +} diff --git a/vendor/github.com/ceph/go-ceph/rados/read_op_read.go b/vendor/github.com/ceph/go-ceph/rados/read_op_read.go new file mode 100644 index 0000000000..dfdd1584ac --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/read_op_read.go @@ -0,0 +1,72 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +import ( + "unsafe" +) + +// ReadOpReadStep holds the result of the Read read operation. +// Result is valid only after Operate() was called. +type ReadOpReadStep struct { + // C returned data: + bytesRead *C.size_t + prval *C.int + + BytesRead int64 // Bytes read by this action. + Result int // Result of this action. +} + +func (s *ReadOpReadStep) update() error { + s.BytesRead = (int64)(*s.bytesRead) + s.Result = (int)(*s.prval) + + return nil +} + +func (s *ReadOpReadStep) free() { + C.free(unsafe.Pointer(s.bytesRead)) + C.free(unsafe.Pointer(s.prval)) + + s.bytesRead = nil + s.prval = nil +} + +func newReadOpReadStep() *ReadOpReadStep { + return &ReadOpReadStep{ + bytesRead: (*C.size_t)(C.malloc(C.sizeof_size_t)), + prval: (*C.int)(C.malloc(C.sizeof_int)), + } +} + +// Read bytes from offset into buffer. +// len(buffer) is the maximum number of bytes read from the object. +// buffer[:ReadOpReadStep.BytesRead] then contains object data. +// +// Implements: +// +// void rados_read_op_read(rados_read_op_t read_op, +// uint64_t offset, +// size_t len, +// char * buffer, +// size_t * bytes_read, +// int * prval) +func (r *ReadOp) Read(offset uint64, buffer []byte) *ReadOpReadStep { + oe := newReadStep(buffer, offset) + readStep := newReadOpReadStep() + r.steps = append(r.steps, oe, readStep) + C.rados_read_op_read( + r.op, + oe.cOffset, + oe.cReadLen, + oe.cBuffer, + readStep.bytesRead, + readStep.prval, + ) + + return readStep +} diff --git a/vendor/github.com/ceph/go-ceph/rados/read_step.go b/vendor/github.com/ceph/go-ceph/rados/read_step.go new file mode 100644 index 0000000000..732f37b00f --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/read_step.go @@ -0,0 +1,31 @@ +package rados + +// #include +import "C" + +import ( + "unsafe" +) + +type readStep struct { + withoutUpdate + withoutFree + // the c pointer utilizes the Go byteslice data and no free is needed + + // inputs: + b []byte + + // arguments: + cBuffer *C.char + cReadLen C.size_t + cOffset C.uint64_t +} + +func newReadStep(b []byte, offset uint64) *readStep { + return &readStep{ + b: b, + cBuffer: (*C.char)(unsafe.Pointer(&b[0])), // TODO: must be pinned + cReadLen: C.size_t(len(b)), + cOffset: C.uint64_t(offset), + } +} diff --git a/vendor/github.com/ceph/go-ceph/rados/snapshot.go b/vendor/github.com/ceph/go-ceph/rados/snapshot.go new file mode 100644 index 0000000000..183a119d2c --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/snapshot.go @@ -0,0 +1,196 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +import "C" + +import ( + "time" + "unsafe" + + "github.com/ceph/go-ceph/internal/retry" +) + +// CreateSnap creates a pool-wide snapshot. +// +// Implements: +// int rados_ioctx_snap_create(rados_ioctx_t io, const char *snapname) +func (ioctx *IOContext) CreateSnap(snapName string) error { + if err := ioctx.validate(); err != nil { + return err + } + + cSnapName := C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rados_ioctx_snap_create(ioctx.ioctx, cSnapName) + return getError(ret) +} + +// RemoveSnap deletes the pool snapshot. +// +// Implements: +// +// int rados_ioctx_snap_remove(rados_ioctx_t io, const char *snapname) +func (ioctx *IOContext) RemoveSnap(snapName string) error { + if err := ioctx.validate(); err != nil { + return err + } + + cSnapName := C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rados_ioctx_snap_remove(ioctx.ioctx, cSnapName) + return getError(ret) +} + +// SnapID represents the ID of a rados snapshot. +type SnapID C.rados_snap_t + +// LookupSnap returns the ID of a pool snapshot. +// +// Implements: +// +// int rados_ioctx_snap_lookup(rados_ioctx_t io, const char *name, rados_snap_t *id) +func (ioctx *IOContext) LookupSnap(snapName string) (SnapID, error) { + var snapID SnapID + + if err := ioctx.validate(); err != nil { + return snapID, err + } + + cSnapName := C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rados_ioctx_snap_lookup( + ioctx.ioctx, + cSnapName, + (*C.rados_snap_t)(&snapID)) + return snapID, getError(ret) +} + +// GetSnapName returns the name of a pool snapshot with the given snapshot ID. +// +// Implements: +// +// int rados_ioctx_snap_get_name(rados_ioctx_t io, rados_snap_t id, char *name, int maxlen) +func (ioctx *IOContext) GetSnapName(snapID SnapID) (string, error) { + if err := ioctx.validate(); err != nil { + return "", err + } + + var ( + buf []byte + err error + ) + // range from 1k to 64KiB + retry.WithSizes(1024, 1<<16, func(len int) retry.Hint { + cLen := C.int(len) + buf = make([]byte, cLen) + ret := C.rados_ioctx_snap_get_name( + ioctx.ioctx, + (C.rados_snap_t)(snapID), + (*C.char)(unsafe.Pointer(&buf[0])), + cLen) + err = getError(ret) + return retry.Size(int(cLen)).If(err == errRange) + }) + + if err != nil { + return "", err + } + return C.GoString((*C.char)(unsafe.Pointer(&buf[0]))), nil +} + +// GetSnapStamp returns the time of the pool snapshot creation. +// +// Implements: +// +// int rados_ioctx_snap_get_stamp(rados_ioctx_t io, rados_snap_t id, time_t *t) +func (ioctx *IOContext) GetSnapStamp(snapID SnapID) (time.Time, error) { + var cTime C.time_t + + if err := ioctx.validate(); err != nil { + return time.Unix(int64(cTime), 0), err + } + + ret := C.rados_ioctx_snap_get_stamp( + ioctx.ioctx, + (C.rados_snap_t)(snapID), + &cTime) + return time.Unix(int64(cTime), 0), getError(ret) +} + +// ListSnaps returns a slice containing the SnapIDs of existing pool snapshots. +// +// Implements: +// +// int rados_ioctx_snap_list(rados_ioctx_t io, rados_snap_t *snaps, int maxlen) +func (ioctx *IOContext) ListSnaps() ([]SnapID, error) { + if err := ioctx.validate(); err != nil { + return nil, err + } + + var ( + snapList []SnapID + cLen C.int + err error + ret C.int + ) + retry.WithSizes(100, 1000, func(maxlen int) retry.Hint { + cLen = C.int(maxlen) + snapList = make([]SnapID, cLen) + ret = C.rados_ioctx_snap_list( + ioctx.ioctx, + (*C.rados_snap_t)(unsafe.Pointer(&snapList[0])), + cLen) + err = getErrorIfNegative(ret) + return retry.Size(int(cLen)).If(err == errRange) + }) + + if err != nil { + return nil, err + } + return snapList[:ret], nil +} + +// RollbackSnap rollbacks the object with key oID to the pool snapshot. +// The contents of the object will be the same as when the snapshot was taken. +// +// Implements: +// +// int rados_ioctx_snap_rollback(rados_ioctx_t io, const char *oid, const char *snapname); +func (ioctx *IOContext) RollbackSnap(oid, snapName string) error { + if err := ioctx.validate(); err != nil { + return err + } + + coid := C.CString(oid) + defer C.free(unsafe.Pointer(coid)) + cSnapName := C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rados_ioctx_snap_rollback(ioctx.ioctx, coid, cSnapName) + return getError(ret) +} + +// SnapHead is the representation of LIBRADOS_SNAP_HEAD from librados. +// SnapHead can be used to reset the IOContext to stop reading from a snapshot. +const SnapHead = SnapID(C.LIBRADOS_SNAP_HEAD) + +// SetReadSnap sets the snapshot from which reads are performed. +// Subsequent reads will return data as it was at the time of that snapshot. +// Pass SnapHead for no snapshot (i.e. normal operation). +// +// Implements: +// +// void rados_ioctx_snap_set_read(rados_ioctx_t io, rados_snap_t snap); +func (ioctx *IOContext) SetReadSnap(snapID SnapID) error { + if err := ioctx.validate(); err != nil { + return err + } + + C.rados_ioctx_snap_set_read(ioctx.ioctx, (C.rados_snap_t)(snapID)) + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/rados/watcher.go b/vendor/github.com/ceph/go-ceph/rados/watcher.go new file mode 100644 index 0000000000..7cd7e90f3d --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/watcher.go @@ -0,0 +1,375 @@ +package rados + +/* +#cgo LDFLAGS: -lrados +#include +#include +extern void watchNotifyCb(void*, uint64_t, uint64_t, uint64_t, void*, size_t); +extern void watchErrorCb(void*, uint64_t, int); +*/ +import "C" + +import ( + "encoding/binary" + "fmt" + "math" + "sync" + "time" + "unsafe" + + "github.com/ceph/go-ceph/internal/log" +) + +type ( + // WatcherID is the unique id of a Watcher. + WatcherID uint64 + // NotifyID is the unique id of a NotifyEvent. + NotifyID uint64 + // NotifierID is the unique id of a notifying client. + NotifierID uint64 +) + +// NotifyEvent is received by a watcher for each notification. +type NotifyEvent struct { + ID NotifyID + WatcherID WatcherID + NotifierID NotifierID + Data []byte +} + +// NotifyAck represents an acknowleged notification. +type NotifyAck struct { + WatcherID WatcherID + NotifierID NotifierID + Response []byte +} + +// NotifyTimeout represents an unacknowleged notification. +type NotifyTimeout struct { + WatcherID WatcherID + NotifierID NotifierID +} + +// Watcher receives all notifications for certain object. +type Watcher struct { + id WatcherID + oid string + ioctx *IOContext + events chan NotifyEvent + errors chan error + done chan struct{} +} + +var ( + watchers = map[WatcherID]*Watcher{} + watchersMtx sync.RWMutex +) + +// Watch creates a Watcher for the specified object. +// +// A Watcher receives all notifications that are sent to the object on which it +// has been created. It exposes two read-only channels: Events() receives all +// the NotifyEvents and Errors() receives all occuring errors. A typical code +// creating a Watcher could look like this: +// +// watcher, err := ioctx.Watch(oid) +// go func() { // event handler +// for ne := range watcher.Events() { +// ... +// ne.Ack([]byte("response data...")) +// ... +// } +// }() +// go func() { // error handler +// for err := range watcher.Errors() { +// ... handle err ... +// } +// }() +// +// CAUTION: the Watcher references the IOContext in which it has been created. +// Therefore all watchers must be deleted with the Delete() method before the +// IOContext is being destroyed. +// +// Implements: +// +// int rados_watch2(rados_ioctx_t io, const char* o, uint64_t* cookie, +// rados_watchcb2_t watchcb, rados_watcherrcb_t watcherrcb, void* arg) +func (ioctx *IOContext) Watch(obj string) (*Watcher, error) { + return ioctx.WatchWithTimeout(obj, 0) +} + +// WatchWithTimeout creates a watcher on an object. Same as Watcher(), but +// different timeout than the default can be specified. +// +// Implements: +// +// int rados_watch3(rados_ioctx_t io, const char *o, uint64_t *cookie, +// rados_watchcb2_t watchcb, rados_watcherrcb_t watcherrcb, uint32_t timeout, +// void *arg); +func (ioctx *IOContext) WatchWithTimeout(oid string, timeout time.Duration) (*Watcher, error) { + cObj := C.CString(oid) + defer C.free(unsafe.Pointer(cObj)) + var id C.uint64_t + watchersMtx.Lock() + defer watchersMtx.Unlock() + ret := C.rados_watch3( + ioctx.ioctx, + cObj, + &id, + (C.rados_watchcb2_t)(C.watchNotifyCb), + (C.rados_watcherrcb_t)(C.watchErrorCb), + C.uint32_t(timeout.Milliseconds()/1000), + nil, + ) + if err := getError(ret); err != nil { + return nil, err + } + evCh := make(chan NotifyEvent) + errCh := make(chan error) + w := &Watcher{ + id: WatcherID(id), + ioctx: ioctx, + oid: oid, + events: evCh, + errors: errCh, + done: make(chan struct{}), + } + watchers[WatcherID(id)] = w + return w, nil +} + +// ID returns the WatcherId of the Watcher +func (w *Watcher) ID() WatcherID { + return w.id +} + +// Events returns a read-only channel, that receives all notifications that are +// sent to the object of the Watcher. +func (w *Watcher) Events() <-chan NotifyEvent { + return w.events +} + +// Errors returns a read-only channel, that receives all errors for the Watcher. +func (w *Watcher) Errors() <-chan error { + return w.errors +} + +// Check on the status of a Watcher. +// +// Returns the time since it was last confirmed. If there is an error, the +// Watcher is no longer valid, and should be destroyed with the Delete() method. +// +// Implements: +// +// int rados_watch_check(rados_ioctx_t io, uint64_t cookie) +func (w *Watcher) Check() (time.Duration, error) { + ret := C.rados_watch_check(w.ioctx.ioctx, C.uint64_t(w.id)) + if ret < 0 { + return 0, getError(ret) + } + return time.Millisecond * time.Duration(ret), nil +} + +// Delete the watcher. This closes both the event and error channel. +// +// Implements: +// +// int rados_unwatch2(rados_ioctx_t io, uint64_t cookie) +func (w *Watcher) Delete() error { + watchersMtx.Lock() + _, ok := watchers[w.id] + if ok { + delete(watchers, w.id) + } + watchersMtx.Unlock() + if !ok { + return nil + } + ret := C.rados_unwatch2(w.ioctx.ioctx, C.uint64_t(w.id)) + if ret != 0 { + return getError(ret) + } + close(w.done) // unblock blocked callbacks + close(w.events) + close(w.errors) + return nil +} + +// Notify sends a notification with the provided data to all Watchers of the +// specified object. +// +// CAUTION: even if the error is not nil. the returned slices +// might still contain data. +func (ioctx *IOContext) Notify(obj string, data []byte) ([]NotifyAck, []NotifyTimeout, error) { + return ioctx.NotifyWithTimeout(obj, data, 0) +} + +// NotifyWithTimeout is like Notify() but with a different timeout than the +// default. +// +// Implements: +// +// int rados_notify2(rados_ioctx_t io, const char* o, const char* buf, int buf_len, +// uint64_t timeout_ms, char** reply_buffer, size_t* reply_buffer_len) +func (ioctx *IOContext) NotifyWithTimeout(obj string, data []byte, timeout time.Duration) ([]NotifyAck, + []NotifyTimeout, error) { + cObj := C.CString(obj) + defer C.free(unsafe.Pointer(cObj)) + var cResponse *C.char + defer C.rados_buffer_free(cResponse) + var responseLen C.size_t + var dataPtr *C.char + if len(data) > 0 { + dataPtr = (*C.char)(unsafe.Pointer(&data[0])) + } + ret := C.rados_notify2( + ioctx.ioctx, + cObj, + dataPtr, + C.int(len(data)), + C.uint64_t(timeout.Milliseconds()), + &cResponse, + &responseLen, + ) + // cResponse has been set even if an error is returned, so we decode it anyway + acks, timeouts := decodeNotifyResponse(cResponse, responseLen) + return acks, timeouts, getError(ret) +} + +// Ack sends an acknowledgement with the specified response data to the notfier +// of the NotifyEvent. If a notify is not ack'ed, the originating Notify() call +// blocks and eventiually times out. +// +// Implements: +// +// int rados_notify_ack(rados_ioctx_t io, const char *o, uint64_t notify_id, +// uint64_t cookie, const char *buf, int buf_len) +func (ne *NotifyEvent) Ack(response []byte) error { + watchersMtx.RLock() + w, ok := watchers[ne.WatcherID] + watchersMtx.RUnlock() + if !ok { + return fmt.Errorf("can't ack on deleted watcher %v", ne.WatcherID) + } + cOID := C.CString(w.oid) + defer C.free(unsafe.Pointer(cOID)) + var respPtr *C.char + if len(response) > 0 { + respPtr = (*C.char)(unsafe.Pointer(&response[0])) + } + ret := C.rados_notify_ack( + w.ioctx.ioctx, + cOID, + C.uint64_t(ne.ID), + C.uint64_t(ne.WatcherID), + respPtr, + C.int(len(response)), + ) + return getError(ret) +} + +// WatcherFlush flushes all pending notifications of the cluster. +// +// Implements: +// +// int rados_watch_flush(rados_t cluster) +func (c *Conn) WatcherFlush() error { + if !c.connected { + return ErrNotConnected + } + ret := C.rados_watch_flush(c.cluster) + return getError(ret) +} + +// decoder for this notify response format: +// +// le32 num_acks +// { +// le64 gid global id for the client (for client.1234 that's 1234) +// le64 cookie cookie for the client +// le32 buflen length of reply message buffer +// u8 buflen payload +// } num_acks +// le32 num_timeouts +// { +// le64 gid global id for the client +// le64 cookie cookie for the client +// } num_timeouts +// +// NOTE: starting with pacific this is implemented as a C function and this can +// be replaced later +func decodeNotifyResponse(response *C.char, len C.size_t) ([]NotifyAck, []NotifyTimeout) { + if len == 0 || response == nil { + return nil, nil + } + b := (*[math.MaxInt32]byte)(unsafe.Pointer(response))[:len:len] + pos := 0 + + num := binary.LittleEndian.Uint32(b[pos:]) + pos += 4 + acks := make([]NotifyAck, num) + for i := range acks { + acks[i].NotifierID = NotifierID(binary.LittleEndian.Uint64(b[pos:])) + pos += 8 + acks[i].WatcherID = WatcherID(binary.LittleEndian.Uint64(b[pos:])) + pos += 8 + dataLen := binary.LittleEndian.Uint32(b[pos:]) + pos += 4 + if dataLen > 0 { + acks[i].Response = C.GoBytes(unsafe.Pointer(&b[pos]), C.int(dataLen)) + pos += int(dataLen) + } + } + + num = binary.LittleEndian.Uint32(b[pos:]) + pos += 4 + timeouts := make([]NotifyTimeout, num) + for i := range timeouts { + timeouts[i].NotifierID = NotifierID(binary.LittleEndian.Uint64(b[pos:])) + pos += 8 + timeouts[i].WatcherID = WatcherID(binary.LittleEndian.Uint64(b[pos:])) + pos += 8 + } + return acks, timeouts +} + +//export watchNotifyCb +func watchNotifyCb(_ unsafe.Pointer, notifyID C.uint64_t, id C.uint64_t, + notifierID C.uint64_t, cData unsafe.Pointer, dataLen C.size_t) { + ev := NotifyEvent{ + ID: NotifyID(notifyID), + WatcherID: WatcherID(id), + NotifierID: NotifierID(notifierID), + } + if dataLen > 0 { + ev.Data = C.GoBytes(cData, C.int(dataLen)) + } + watchersMtx.RLock() + w, ok := watchers[WatcherID(id)] + watchersMtx.RUnlock() + if !ok { + // usually this should not happen, but who knows + log.Warnf("received notification for unknown watcher ID: %#v", ev) + return + } + select { + case <-w.done: // unblock when deleted + case w.events <- ev: + } +} + +//export watchErrorCb +func watchErrorCb(_ unsafe.Pointer, id C.uint64_t, err C.int) { + watchersMtx.RLock() + w, ok := watchers[WatcherID(id)] + watchersMtx.RUnlock() + if !ok { + // usually this should not happen, but who knows + log.Warnf("received error for unknown watcher ID: id=%d err=%#v", id, err) + return + } + select { + case <-w.done: // unblock when deleted + case w.errors <- getError(err): + } +} diff --git a/vendor/github.com/ceph/go-ceph/rados/write_op.go b/vendor/github.com/ceph/go-ceph/rados/write_op.go new file mode 100644 index 0000000000..060537f402 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/write_op.go @@ -0,0 +1,199 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// #include +// +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + ts "github.com/ceph/go-ceph/internal/timespec" +) + +// Timespec is a public type for the internal C 'struct timespec' +type Timespec ts.Timespec + +// WriteOp manages a set of discrete actions that will be performed together +// atomically. +type WriteOp struct { + operation + op C.rados_write_op_t +} + +// CreateWriteOp returns a newly constructed write operation. +func CreateWriteOp() *WriteOp { + return &WriteOp{ + op: C.rados_create_write_op(), + } +} + +// Release the resources associated with this write operation. +func (w *WriteOp) Release() { + C.rados_release_write_op(w.op) + w.op = nil + w.free() +} + +func (w WriteOp) operate2( + ioctx *IOContext, oid string, mtime *Timespec, flags OperationFlags) error { + + if err := ioctx.validate(); err != nil { + return err + } + + cOid := C.CString(oid) + defer C.free(unsafe.Pointer(cOid)) + var cMtime *C.struct_timespec + if mtime != nil { + cMtime = &C.struct_timespec{} + ts.CopyToCStruct( + ts.Timespec(*mtime), + ts.CTimespecPtr(cMtime)) + } + + ret := C.rados_write_op_operate2( + w.op, ioctx.ioctx, cOid, cMtime, C.int(flags)) + return w.update(writeOp, ret) +} + +// Operate will perform the operation(s). +func (w *WriteOp) Operate(ioctx *IOContext, oid string, flags OperationFlags) error { + return w.operate2(ioctx, oid, nil, flags) +} + +// OperateWithMtime will perform the operation while setting the modification +// time stamp to the supplied value. +func (w *WriteOp) OperateWithMtime( + ioctx *IOContext, oid string, mtime Timespec, flags OperationFlags) error { + + return w.operate2(ioctx, oid, &mtime, flags) +} + +func (w *WriteOp) operateCompat(ioctx *IOContext, oid string) error { + switch err := w.Operate(ioctx, oid, OperationNoFlag).(type) { + case nil: + return nil + case OperationError: + return err.OpError + default: + return err + } +} + +// Create a rados object. +func (w *WriteOp) Create(exclusive CreateOption) { + // category, the 3rd param, is deprecated and has no effect so we do not + // implement it in go-ceph + C.rados_write_op_create(w.op, C.int(exclusive), nil) +} + +// SetOmap appends the map `pairs` to the omap `oid`. +func (w *WriteOp) SetOmap(pairs map[string][]byte) { + keys := make([]string, len(pairs)) + values := make([][]byte, len(pairs)) + idx := 0 + for k, v := range pairs { + keys[idx] = k + values[idx] = v + idx++ + } + + cKeys := cutil.NewBufferGroupStrings(keys) + cValues := cutil.NewBufferGroupBytes(values) + defer cKeys.Free() + defer cValues.Free() + + C.rados_write_op_omap_set2( + w.op, + (**C.char)(cKeys.BuffersPtr()), + (**C.char)(cValues.BuffersPtr()), + (*C.size_t)(cKeys.LengthsPtr()), + (*C.size_t)(cValues.LengthsPtr()), + (C.size_t)(len(pairs))) +} + +// RmOmapKeys removes the specified `keys` from the omap `oid`. +func (w *WriteOp) RmOmapKeys(keys []string) { + cKeys := cutil.NewBufferGroupStrings(keys) + defer cKeys.Free() + + C.rados_write_op_omap_rm_keys2( + w.op, + (**C.char)(cKeys.BuffersPtr()), + (*C.size_t)(cKeys.LengthsPtr()), + (C.size_t)(len(keys))) +} + +// CleanOmap clears the omap `oid`. +func (w *WriteOp) CleanOmap() { + C.rados_write_op_omap_clear(w.op) +} + +// AssertExists assures the object targeted by the write op exists. +// +// Implements: +// +// void rados_write_op_assert_exists(rados_write_op_t write_op); +func (w *WriteOp) AssertExists() { + C.rados_write_op_assert_exists(w.op) +} + +// Write a given byte slice at the supplied offset. +// +// Implements: +// +// void rados_write_op_write(rados_write_op_t write_op, +// const char *buffer, +// size_t len, +// uint64_t offset); +func (w *WriteOp) Write(b []byte, offset uint64) { + oe := newWriteStep(b, 0, offset) + w.steps = append(w.steps, oe) + C.rados_write_op_write( + w.op, + oe.cBuffer, + oe.cDataLen, + oe.cOffset) +} + +// WriteFull writes a given byte slice as the whole object, +// atomically replacing it. +// +// Implements: +// +// void rados_write_op_write_full(rados_write_op_t write_op, +// const char *buffer, +// size_t len); +func (w *WriteOp) WriteFull(b []byte) { + oe := newWriteStep(b, 0, 0) + w.steps = append(w.steps, oe) + C.rados_write_op_write_full( + w.op, + oe.cBuffer, + oe.cDataLen) +} + +// WriteSame write a given byte slice to the object multiple times, until +// writeLen is satisfied. +// +// Implements: +// +// void rados_write_op_writesame(rados_write_op_t write_op, +// const char *buffer, +// size_t data_len, +// size_t write_len, +// uint64_t offset); +func (w *WriteOp) WriteSame(b []byte, writeLen, offset uint64) { + oe := newWriteStep(b, writeLen, offset) + w.steps = append(w.steps, oe) + C.rados_write_op_writesame( + w.op, + oe.cBuffer, + oe.cDataLen, + oe.cWriteLen, + oe.cOffset) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/write_op_cmpext.go b/vendor/github.com/ceph/go-ceph/rados/write_op_cmpext.go new file mode 100644 index 0000000000..24112452f3 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/write_op_cmpext.go @@ -0,0 +1,60 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +import ( + "unsafe" +) + +// WriteOpCmpExtStep holds result of the CmpExt write operation. +// Result is valid only after Operate() was called. +type WriteOpCmpExtStep struct { + // C returned data: + prval *C.int + + // Result of the CmpExt write operation. + Result int +} + +func (s *WriteOpCmpExtStep) update() error { + s.Result = int(*s.prval) + return nil +} + +func (s *WriteOpCmpExtStep) free() { + C.free(unsafe.Pointer(s.prval)) + s.prval = nil +} + +func newWriteOpCmpExtStep() *WriteOpCmpExtStep { + return &WriteOpCmpExtStep{ + prval: (*C.int)(C.malloc(C.sizeof_int)), + } +} + +// CmpExt ensures that given object range (extent) satisfies comparison. +// +// Implements: +// +// void rados_write_op_cmpext(rados_write_op_t write_op, +// const char * cmp_buf, +// size_t cmp_len, +// uint64_t off, +// int * prval); +func (w *WriteOp) CmpExt(b []byte, offset uint64) *WriteOpCmpExtStep { + oe := newWriteStep(b, 0, offset) + cmpExtStep := newWriteOpCmpExtStep() + w.steps = append(w.steps, oe, cmpExtStep) + C.rados_write_op_cmpext( + w.op, + oe.cBuffer, + oe.cDataLen, + oe.cOffset, + cmpExtStep.prval) + + return cmpExtStep +} diff --git a/vendor/github.com/ceph/go-ceph/rados/write_op_set_alloc_hint.go b/vendor/github.com/ceph/go-ceph/rados/write_op_set_alloc_hint.go new file mode 100644 index 0000000000..928aa4e27d --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/write_op_set_alloc_hint.go @@ -0,0 +1,26 @@ +package rados + +// #cgo LDFLAGS: -lrados +// #include +// #include +// +import "C" + +// SetAllocationHint sets allocation hint for an object. This is an advisory +// operation, it will always succeed (as if it was submitted with a +// LIBRADOS_OP_FLAG_FAILOK flag set) and is not guaranteed to do anything on +// the backend. +// +// Implements: +// +// void rados_write_op_set_alloc_hint2(rados_write_op_t write_op, +// uint64_t expected_object_size, +// uint64_t expected_write_size, +// uint32_t flags); +func (w *WriteOp) SetAllocationHint(expectedObjectSize uint64, expectedWriteSize uint64, flags AllocHintFlags) { + C.rados_write_op_set_alloc_hint2( + w.op, + C.uint64_t(expectedObjectSize), + C.uint64_t(expectedWriteSize), + C.uint32_t(flags)) +} diff --git a/vendor/github.com/ceph/go-ceph/rados/write_step.go b/vendor/github.com/ceph/go-ceph/rados/write_step.go new file mode 100644 index 0000000000..3774a6e9fa --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rados/write_step.go @@ -0,0 +1,33 @@ +package rados + +// #include +import "C" + +import ( + "unsafe" +) + +type writeStep struct { + withoutUpdate + withoutFree + // the c pointer utilizes the Go byteslice data and no free is needed + + // inputs: + b []byte + + // arguments: + cBuffer *C.char + cDataLen C.size_t + cWriteLen C.size_t + cOffset C.uint64_t +} + +func newWriteStep(b []byte, writeLen, offset uint64) *writeStep { + return &writeStep{ + b: b, + cBuffer: (*C.char)(unsafe.Pointer(&b[0])), // TODO: must be pinned + cDataLen: C.size_t(len(b)), + cWriteLen: C.size_t(writeLen), + cOffset: C.uint64_t(offset), + } +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/diff_iterate.go b/vendor/github.com/ceph/go-ceph/rbd/diff_iterate.go new file mode 100644 index 0000000000..bb203884d9 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/diff_iterate.go @@ -0,0 +1,131 @@ +package rbd + +/* +#cgo LDFLAGS: -lrbd +#undef _GNU_SOURCE +#include +#include +#include + +extern int diffIterateCallback(uint64_t, size_t, int, uintptr_t); + +// inline wrapper to cast uintptr_t to void* +static inline int wrap_rbd_diff_iterate2(rbd_image_t image, + const char *fromsnapname, uint64_t ofs, uint64_t len, uint8_t include_parent, + uint8_t whole_object, uintptr_t arg) { + return rbd_diff_iterate2(image, fromsnapname, ofs, len, include_parent, + whole_object, (void*)diffIterateCallback, (void*)arg); +}; +*/ +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/callbacks" +) + +var diffIterateCallbacks = callbacks.New() + +// DiffIncludeParent values control if the difference should include the parent +// image. +type DiffIncludeParent uint8 + +// DiffWholeObject values control if the diff extents should cover the whole +// object. +type DiffWholeObject uint8 + +// DiffIterateCallback defines the function signature needed for the +// DiffIterate callback. +// +// The function will be called with the arguments: offset, length, exists, and +// data. The offset and length correspond to the changed region of the image. +// The exists value is set to zero if the region is known to be zeros, +// otherwise it is set to 1. The data value is the extra data parameter that +// was set on the DiffIterateConfig and is meant to be used for passing +// arbitrary user-defined items to the callback function. +// +// The callback can trigger the iteration to terminate early by returning +// a non-zero error code. +type DiffIterateCallback func(uint64, uint64, int, interface{}) int + +// DiffIterateConfig is used to define the parameters of a DiffIterate call. +// Callback, Offset, and Length should always be specified when passed to +// DiffIterate. The other values are optional. +type DiffIterateConfig struct { + SnapName string + Offset uint64 + Length uint64 + IncludeParent DiffIncludeParent + WholeObject DiffWholeObject + Callback DiffIterateCallback + Data interface{} +} + +const ( + // ExcludeParent will exclude the parent from the diff. + ExcludeParent = DiffIncludeParent(0) + // IncludeParent will include the parent in the diff. + IncludeParent = DiffIncludeParent(1) + + // DisableWholeObject will not use the whole object in the diff. + DisableWholeObject = DiffWholeObject(0) + // EnableWholeObject will use the whole object in the diff. + EnableWholeObject = DiffWholeObject(1) +) + +// DiffIterate calls a callback on changed extents of an image. +// +// Calling DiffIterate will cause the callback specified in the +// DiffIterateConfig to be called as many times as there are changed +// regions in the image (controlled by the parameters as passed to librbd). +// +// See the documentation of DiffIterateCallback for a description of the +// arguments to the callback and the return behavior. +// +// Implements: +// +// int rbd_diff_iterate2(rbd_image_t image, +// const char *fromsnapname, +// uint64_t ofs, uint64_t len, +// uint8_t include_parent, uint8_t whole_object, +// int (*cb)(uint64_t, size_t, int, void *), +// void *arg); +func (image *Image) DiffIterate(config DiffIterateConfig) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + if config.Callback == nil { + return rbdError(C.EINVAL) + } + + var cSnapName *C.char + if config.SnapName != NoSnapshot { + cSnapName = C.CString(config.SnapName) + defer C.free(unsafe.Pointer(cSnapName)) + } + + cbIndex := diffIterateCallbacks.Add(config) + defer diffIterateCallbacks.Remove(cbIndex) + + ret := C.wrap_rbd_diff_iterate2( + image.image, + cSnapName, + C.uint64_t(config.Offset), + C.uint64_t(config.Length), + C.uint8_t(config.IncludeParent), + C.uint8_t(config.WholeObject), + C.uintptr_t(cbIndex)) + + return getError(ret) +} + +//export diffIterateCallback +func diffIterateCallback( + offset C.uint64_t, length C.size_t, exists C.int, index uintptr) C.int { + + v := diffIterateCallbacks.Lookup(index) + config := v.(DiffIterateConfig) + return C.int(config.Callback( + uint64(offset), uint64(length), int(exists), config.Data)) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/doc.go b/vendor/github.com/ceph/go-ceph/rbd/doc.go new file mode 100644 index 0000000000..76643435ac --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/doc.go @@ -0,0 +1,4 @@ +/* +Package rbd contains a set of wrappers around Ceph's librbd API. +*/ +package rbd diff --git a/vendor/github.com/ceph/go-ceph/rbd/encryption.go b/vendor/github.com/ceph/go-ceph/rbd/encryption.go new file mode 100644 index 0000000000..a0ddfea9df --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/encryption.go @@ -0,0 +1,142 @@ +//go:build !octopus && !nautilus +// +build !octopus,!nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// /* force XSI-complaint strerror_r() */ +// #define _POSIX_C_SOURCE 200112L +// #undef _GNU_SOURCE +// #include +// #include +// #include +// #include +import "C" + +import ( + "unsafe" +) + +// cEncryptionData contains the data needed by the encryption functions +type cEncryptionData struct { + format C.rbd_encryption_format_t + opts C.rbd_encryption_options_t + optsSize C.size_t + free func() +} + +// EncryptionAlgorithm is the encryption algorithm +type EncryptionAlgorithm C.rbd_encryption_algorithm_t + +// Possible values for EncryptionAlgorithm: +// EncryptionAlgorithmAES128: AES 128bits +// EncryptionAlgorithmAES256: AES 256bits +const ( + EncryptionAlgorithmAES128 = EncryptionAlgorithm(C.RBD_ENCRYPTION_ALGORITHM_AES128) + EncryptionAlgorithmAES256 = EncryptionAlgorithm(C.RBD_ENCRYPTION_ALGORITHM_AES256) +) + +// EncryptionOptionsLUKS1 and EncryptionOptionsLUKS2 are identical +// structures at the moment, just as they are in the librbd api. +// The purpose behind creating different identical structures, is to facilitate +// future modifications of one of the formats, while maintaining backwards +// compatibility with the other. + +// EncryptionOptionsLUKS1 options required for LUKS v1 +type EncryptionOptionsLUKS1 struct { + Alg EncryptionAlgorithm + Passphrase []byte +} + +// EncryptionOptionsLUKS2 options required for LUKS v2 +type EncryptionOptionsLUKS2 struct { + Alg EncryptionAlgorithm + Passphrase []byte +} + +// EncryptionOptions interface is used to encapsulate the different encryption +// formats options and enable converting them from go to C structures. +type EncryptionOptions interface { + allocateEncryptionOptions() cEncryptionData +} + +func (opts EncryptionOptionsLUKS1) allocateEncryptionOptions() cEncryptionData { + var cOpts C.rbd_encryption_luks1_format_options_t + var retData cEncryptionData + cOpts.alg = C.rbd_encryption_algorithm_t(opts.Alg) + //CBytes allocates memory which we'll free by calling cOptsFree() + cOpts.passphrase = (*C.char)(C.CBytes(opts.Passphrase)) + cOpts.passphrase_size = C.size_t(len(opts.Passphrase)) + retData.opts = C.rbd_encryption_options_t(&cOpts) + retData.optsSize = C.size_t(C.sizeof_rbd_encryption_luks1_format_options_t) + retData.free = func() { C.free(unsafe.Pointer(cOpts.passphrase)) } + retData.format = C.RBD_ENCRYPTION_FORMAT_LUKS1 + return retData +} + +func (opts EncryptionOptionsLUKS2) allocateEncryptionOptions() cEncryptionData { + var cOpts C.rbd_encryption_luks2_format_options_t + var retData cEncryptionData + cOpts.alg = C.rbd_encryption_algorithm_t(opts.Alg) + //CBytes allocates memory which we'll free by calling cOptsFree() + cOpts.passphrase = (*C.char)(C.CBytes(opts.Passphrase)) + cOpts.passphrase_size = C.size_t(len(opts.Passphrase)) + retData.opts = C.rbd_encryption_options_t(&cOpts) + retData.optsSize = C.size_t(C.sizeof_rbd_encryption_luks2_format_options_t) + retData.free = func() { C.free(unsafe.Pointer(cOpts.passphrase)) } + retData.format = C.RBD_ENCRYPTION_FORMAT_LUKS2 + return retData +} + +// EncryptionFormat creates an encryption format header +// +// Implements: +// +// int rbd_encryption_format(rbd_image_t image, +// rbd_encryption_format_t format, +// rbd_encryption_options_t opts, +// size_t opts_size); +// +// To issue an IO against the image, you need to mount the image +// with libvirt/qemu using the LUKS format, or make a call to +// rbd_encryption_load(). +func (image *Image) EncryptionFormat(opts EncryptionOptions) error { + if image.image == nil { + return ErrImageNotOpen + } + + encryptionOpts := opts.allocateEncryptionOptions() + defer encryptionOpts.free() + + ret := C.rbd_encryption_format( + image.image, + encryptionOpts.format, + encryptionOpts.opts, + encryptionOpts.optsSize) + + return getError(ret) +} + +// EncryptionLoad enables IO on an open encrypted image +// +// Implements: +// +// int rbd_encryption_load(rbd_image_t image, +// rbd_encryption_format_t format, +// rbd_encryption_options_t opts, +// size_t opts_size); +func (image *Image) EncryptionLoad(opts EncryptionOptions) error { + if image.image == nil { + return ErrImageNotOpen + } + + encryptionOpts := opts.allocateEncryptionOptions() + defer encryptionOpts.free() + + ret := C.rbd_encryption_load( + image.image, + encryptionOpts.format, + encryptionOpts.opts, + encryptionOpts.optsSize) + return getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/errors.go b/vendor/github.com/ceph/go-ceph/rbd/errors.go new file mode 100644 index 0000000000..34693333f5 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/errors.go @@ -0,0 +1,84 @@ +package rbd + +/* +#include +*/ +import "C" + +import ( + "errors" + + "github.com/ceph/go-ceph/internal/errutil" +) + +// rbdError represents an error condition returned from the librbd APIs. +type rbdError int + +func (e rbdError) Error() string { + return errutil.FormatErrorCode("rbd", int(e)) +} + +func (e rbdError) ErrorCode() int { + return int(e) +} + +func getError(err C.int) error { + if err != 0 { + if err == -C.ENOENT { + return ErrNotFound + } + return rbdError(err) + } + return nil +} + +// getErrorIfNegative converts a ceph return code to error if negative. +// This is useful for functions that return a usable positive value on +// success but a negative error number on error. +func getErrorIfNegative(ret C.int) error { + if ret >= 0 { + return nil + } + return getError(ret) +} + +// Public go errors: + +var ( + // ErrNoIOContext may be returned if an api call requires an IOContext and + // it is not provided. + ErrNoIOContext = errors.New("IOContext is missing") + // ErrNoName may be returned if an api call requires a name and it is + // not provided. + ErrNoName = errors.New("RBD image does not have a name") + // ErrSnapshotNoName may be returned if an api call requires a snapshot + // name and it is not provided. + ErrSnapshotNoName = errors.New("RBD snapshot does not have a name") + // ErrImageNotOpen may be returned if an api call requires an open image handle and one is not provided. + ErrImageNotOpen = errors.New("RBD image not open") + // ErrImageIsOpen may be returned if an api call requires a closed image handle and one is not provided. + ErrImageIsOpen = errors.New("RBD image is open") + // ErrNotFound may be returned from an api call when the requested item is + // missing. + ErrNotFound = errors.New("RBD image not found") + // ErrNoNamespaceName maye be returned if an api call requires a namespace + // name and it is not provided. + ErrNoNamespaceName = errors.New("Namespace value is missing") + + // revive:disable:exported for compatibility with old versions + RbdErrorImageNotOpen = ErrImageNotOpen + RbdErrorNotFound = ErrNotFound + // revive:enable:exported +) + +// Public general error +const ( + // ErrNotExist indicates a non-specific missing resource. + ErrNotExist = rbdError(-C.ENOENT) +) + +// Private errors: + +const ( + errRange = rbdError(-C.ERANGE) +) diff --git a/vendor/github.com/ceph/go-ceph/rbd/features.go b/vendor/github.com/ceph/go-ceph/rbd/features.go new file mode 100644 index 0000000000..990e84925f --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/features.go @@ -0,0 +1,187 @@ +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +import "C" + +const ( + // RBD features, bit values + + // FeatureLayering is the representation of RBD_FEATURE_LAYERING from + // librbd + FeatureLayering = uint64(C.RBD_FEATURE_LAYERING) + + // FeatureStripingV2 is the representation of RBD_FEATURE_STRIPINGV2 + // from librbd + FeatureStripingV2 = uint64(C.RBD_FEATURE_STRIPINGV2) + + // FeatureExclusiveLock is the representation of + // RBD_FEATURE_EXCLUSIVE_LOCK from librbd + FeatureExclusiveLock = uint64(C.RBD_FEATURE_EXCLUSIVE_LOCK) + + // FeatureObjectMap is the representation of RBD_FEATURE_OBJECT_MAP + // from librbd + FeatureObjectMap = uint64(C.RBD_FEATURE_OBJECT_MAP) + + // FeatureFastDiff is the representation of RBD_FEATURE_FAST_DIFF from + // librbd + FeatureFastDiff = uint64(C.RBD_FEATURE_FAST_DIFF) + + // FeatureDeepFlatten is the representation of RBD_FEATURE_DEEP_FLATTEN + // from librbd + FeatureDeepFlatten = uint64(C.RBD_FEATURE_DEEP_FLATTEN) + + // FeatureJournaling is the representation of RBD_FEATURE_JOURNALING + // from librbd + FeatureJournaling = uint64(C.RBD_FEATURE_JOURNALING) + + // FeatureDataPool is the representation of RBD_FEATURE_DATA_POOL from + // librbd + FeatureDataPool = uint64(C.RBD_FEATURE_DATA_POOL) + + // FeatureOperations is the representation of RBD_FEATURE_OPERATIONS + // from librbd + FeatureOperations = uint64(C.RBD_FEATURE_OPERATIONS) + + // RBD features, strings + + // FeatureNameLayering is the representation of + // RBD_FEATURE_NAME_LAYERING from librbd + FeatureNameLayering = C.RBD_FEATURE_NAME_LAYERING + + // FeatureNameStripingV2 is the representation of + // RBD_FEATURE_NAME_STRIPINGV2 from librbd + FeatureNameStripingV2 = C.RBD_FEATURE_NAME_STRIPINGV2 + + // FeatureNameExclusiveLock is the representation of + // RBD_FEATURE_NAME_EXCLUSIVE_LOCK from librbd + FeatureNameExclusiveLock = C.RBD_FEATURE_NAME_EXCLUSIVE_LOCK + + // FeatureNameObjectMap is the representation of + // RBD_FEATURE_NAME_OBJECT_MAP from librbd + FeatureNameObjectMap = C.RBD_FEATURE_NAME_OBJECT_MAP + + // FeatureNameFastDiff is the representation of + // RBD_FEATURE_NAME_FAST_DIFF from librbd + FeatureNameFastDiff = C.RBD_FEATURE_NAME_FAST_DIFF + + // FeatureNameDeepFlatten is the representation of + // RBD_FEATURE_NAME_DEEP_FLATTEN from librbd + FeatureNameDeepFlatten = C.RBD_FEATURE_NAME_DEEP_FLATTEN + + // FeatureNameJournaling is the representation of + // RBD_FEATURE_NAME_JOURNALING from librbd + FeatureNameJournaling = C.RBD_FEATURE_NAME_JOURNALING + + // FeatureNameDataPool is the representation of + // RBD_FEATURE_NAME_DATA_POOL from librbd + FeatureNameDataPool = C.RBD_FEATURE_NAME_DATA_POOL + + // FeatureNameOperations is the representation of + // RBD_FEATURE_NAME_OPERATIONS from librbd + FeatureNameOperations = C.RBD_FEATURE_NAME_OPERATIONS + + // old names for backwards compatibility (unused?) + + // RbdFeatureLayering deprecated alias for FeatureLayering + RbdFeatureLayering = FeatureLayering + // RbdFeatureStripingV2 deprecated alias for FeatureStripingV2 + RbdFeatureStripingV2 = FeatureStripingV2 + // RbdFeatureExclusiveLock deprecated alias for FeatureExclusiveLock + RbdFeatureExclusiveLock = FeatureExclusiveLock + // RbdFeatureObjectMap deprecated alias for FeatureObjectMap + RbdFeatureObjectMap = FeatureObjectMap + // RbdFeatureFastDiff deprecated alias for FeatureFastDiff + RbdFeatureFastDiff = FeatureFastDiff + // RbdFeatureDeepFlatten deprecated alias for FeatureDeepFlatten + RbdFeatureDeepFlatten = FeatureDeepFlatten + // RbdFeatureJournaling deprecated alias for FeatureJournaling + RbdFeatureJournaling = FeatureJournaling + // RbdFeatureDataPool deprecated alias for FeatureDataPool + RbdFeatureDataPool = FeatureDataPool + + // revive:disable:exported Maybe unused + // the following are probably really unused? + RbdFeaturesDefault = uint64(C.RBD_FEATURES_DEFAULT) + RbdFeaturesIncompatible = uint64(C.RBD_FEATURES_INCOMPATIBLE) + RbdFeaturesRwIncompatible = uint64(C.RBD_FEATURES_RW_INCOMPATIBLE) + RbdFeaturesMutable = uint64(C.RBD_FEATURES_MUTABLE) + RbdFeaturesSingleClient = uint64(C.RBD_FEATURES_SINGLE_CLIENT) + // revive:enable:exported +) + +// FeatureSet is a combination of the bit value for multiple features. +type FeatureSet uint64 + +var ( + featureNameToBit = map[string]uint64{ + FeatureNameLayering: FeatureLayering, + FeatureNameStripingV2: FeatureStripingV2, + FeatureNameExclusiveLock: FeatureExclusiveLock, + FeatureNameObjectMap: FeatureObjectMap, + FeatureNameFastDiff: FeatureFastDiff, + FeatureNameDeepFlatten: FeatureDeepFlatten, + FeatureNameJournaling: FeatureJournaling, + FeatureNameDataPool: FeatureDataPool, + FeatureNameOperations: FeatureOperations, + } +) + +// FeatureSetFromNames returns a FeatureSet built from flag bits corresponding +// to the provided feature names. +func FeatureSetFromNames(names []string) FeatureSet { + var fs uint64 + for _, name := range names { + fs |= featureNameToBit[name] + } + return FeatureSet(fs) +} + +// Names converts all of the enabled feature bits in the FeatureSet to +// a slice of strings corresponding to the names for each feature. +func (fs *FeatureSet) Names() []string { + names := []string{} + + for name, bit := range featureNameToBit { + if (uint64(*fs) & bit) == bit { + names = append(names, name) + } + } + + return names +} + +// GetFeatures returns the features bitmask for the rbd image. +// +// Implements: +// +// int rbd_get_features(rbd_image_t image, uint64_t *features); +func (image *Image) GetFeatures() (features uint64, err error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + if ret := C.rbd_get_features(image.image, (*C.uint64_t)(&features)); ret < 0 { + return 0, rbdError(ret) + } + + return features, nil +} + +// UpdateFeatures updates the features on the Image. +// +// Implements: +// +// int rbd_update_features(rbd_image_t image, uint64_t features, +// uint8_t enabled); +func (image *Image) UpdateFeatures(features uint64, enabled bool) error { + if image.image == nil { + return RbdErrorImageNotOpen + } + + cEnabled := C.uint8_t(0) + if enabled { + cEnabled = 1 + } + return getError(C.rbd_update_features(image.image, C.uint64_t(features), cEnabled)) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/features_nautilus.go b/vendor/github.com/ceph/go-ceph/rbd/features_nautilus.go new file mode 100644 index 0000000000..be12e497c3 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/features_nautilus.go @@ -0,0 +1,18 @@ +package rbd + +// #include +import "C" + +const ( + // FeatureMigrating is the representation of RBD_FEATURE_MIGRATING from + // librbd + FeatureMigrating = uint64(C.RBD_FEATURE_MIGRATING) + + // FeatureNameMigrating is the representation of + // RBD_FEATURE_NAME_MIGRATING from librbd + FeatureNameMigrating = C.RBD_FEATURE_NAME_MIGRATING +) + +func init() { + featureNameToBit[FeatureNameMigrating] = FeatureMigrating +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/group.go b/vendor/github.com/ceph/go-ceph/rbd/group.go new file mode 100644 index 0000000000..654d15e3e3 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/group.go @@ -0,0 +1,267 @@ +package rbd + +/* +#cgo LDFLAGS: -lrbd +#include +#include +*/ +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" + "github.com/ceph/go-ceph/rados" +) + +// GroupCreate is used to create an image group. +// +// Implements: +// +// int rbd_group_create(rados_ioctx_t p, const char *name); +func GroupCreate(ioctx *rados.IOContext, name string) error { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rbd_group_create(cephIoctx(ioctx), cName) + return getError(ret) +} + +// GroupRemove is used to remove an image group. +// +// Implements: +// +// int rbd_group_remove(rados_ioctx_t p, const char *name); +func GroupRemove(ioctx *rados.IOContext, name string) error { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rbd_group_remove(cephIoctx(ioctx), cName) + return getError(ret) +} + +// GroupRename will rename an existing image group. +// +// Implements: +// +// int rbd_group_rename(rados_ioctx_t p, const char *src_name, +// const char *dest_name); +func GroupRename(ioctx *rados.IOContext, src, dest string) error { + cSrc := C.CString(src) + defer C.free(unsafe.Pointer(cSrc)) + cDest := C.CString(dest) + defer C.free(unsafe.Pointer(cDest)) + + ret := C.rbd_group_rename(cephIoctx(ioctx), cSrc, cDest) + return getError(ret) +} + +// GroupList returns a slice of image group names. +// +// Implements: +// +// int rbd_group_list(rados_ioctx_t p, char *names, size_t *size); +func GroupList(ioctx *rados.IOContext) ([]string, error) { + var ( + buf []byte + err error + ret C.int + ) + retry.WithSizes(1024, 262144, func(size int) retry.Hint { + cSize := C.size_t(size) + buf = make([]byte, cSize) + ret = C.rbd_group_list( + cephIoctx(ioctx), + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + + if err != nil { + return nil, err + } + + // cSize is not set to the expected size when it is sufficiently large + // but ret will be set to the size in a non-error condition. + groups := cutil.SplitBuffer(buf[:ret]) + return groups, nil +} + +// GroupImageAdd will add the specified image to the named group. +// An io context must be supplied for both the group and image. +// +// Implements: +// +// int rbd_group_image_add(rados_ioctx_t group_p, +// const char *group_name, +// rados_ioctx_t image_p, +// const char *image_name); +func GroupImageAdd(groupIoctx *rados.IOContext, groupName string, + imageIoctx *rados.IOContext, imageName string) error { + + cGroupName := C.CString(groupName) + defer C.free(unsafe.Pointer(cGroupName)) + cImageName := C.CString(imageName) + defer C.free(unsafe.Pointer(cImageName)) + + ret := C.rbd_group_image_add( + cephIoctx(groupIoctx), + cGroupName, + cephIoctx(imageIoctx), + cImageName) + return getError(ret) +} + +// GroupImageRemove will remove the specified image from the named group. +// An io context must be supplied for both the group and image. +// +// Implements: +// +// int rbd_group_image_remove(rados_ioctx_t group_p, +// const char *group_name, +// rados_ioctx_t image_p, +// const char *image_name); +func GroupImageRemove(groupIoctx *rados.IOContext, groupName string, + imageIoctx *rados.IOContext, imageName string) error { + + cGroupName := C.CString(groupName) + defer C.free(unsafe.Pointer(cGroupName)) + cImageName := C.CString(imageName) + defer C.free(unsafe.Pointer(cImageName)) + + ret := C.rbd_group_image_remove( + cephIoctx(groupIoctx), + cGroupName, + cephIoctx(imageIoctx), + cImageName) + return getError(ret) +} + +// GroupImageRemoveByID will remove the specified image from the named group. +// An io context must be supplied for both the group and image. +// +// Implements: +// +// CEPH_RBD_API int rbd_group_image_remove_by_id(rados_ioctx_t group_p, +// const char *group_name, +// rados_ioctx_t image_p, +// const char *image_id); +func GroupImageRemoveByID(groupIoctx *rados.IOContext, groupName string, + imageIoctx *rados.IOContext, imageID string) error { + + cGroupName := C.CString(groupName) + defer C.free(unsafe.Pointer(cGroupName)) + cid := C.CString(imageID) + defer C.free(unsafe.Pointer(cid)) + + ret := C.rbd_group_image_remove_by_id( + cephIoctx(groupIoctx), + cGroupName, + cephIoctx(imageIoctx), + cid) + return getError(ret) +} + +// GroupImageState indicates an image's state in a group. +type GroupImageState int + +const ( + // GroupImageStateAttached is equivalent to RBD_GROUP_IMAGE_STATE_ATTACHED + GroupImageStateAttached = GroupImageState(C.RBD_GROUP_IMAGE_STATE_ATTACHED) + // GroupImageStateIncomplete is equivalent to RBD_GROUP_IMAGE_STATE_INCOMPLETE + GroupImageStateIncomplete = GroupImageState(C.RBD_GROUP_IMAGE_STATE_INCOMPLETE) +) + +// GroupImageInfo reports on images within a group. +type GroupImageInfo struct { + Name string + PoolID int64 + State GroupImageState +} + +// GroupImageList returns a slice of GroupImageInfo types based on the +// images that are part of the named group. +// +// Implements: +// +// int rbd_group_image_list(rados_ioctx_t group_p, +// const char *group_name, +// rbd_group_image_info_t *images, +// size_t group_image_info_size, +// size_t *num_entries); +func GroupImageList(ioctx *rados.IOContext, name string) ([]GroupImageInfo, error) { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + var ( + cImages []C.rbd_group_image_info_t + cSize C.size_t + err error + ) + retry.WithSizes(1024, 262144, func(size int) retry.Hint { + cSize = C.size_t(size) + cImages = make([]C.rbd_group_image_info_t, cSize) + ret := C.rbd_group_image_list( + cephIoctx(ioctx), + cName, + (*C.rbd_group_image_info_t)(unsafe.Pointer(&cImages[0])), + C.sizeof_rbd_group_image_info_t, + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + + if err != nil { + return nil, err + } + + images := make([]GroupImageInfo, cSize) + for i := range images { + images[i].Name = C.GoString(cImages[i].name) + images[i].PoolID = int64(cImages[i].pool) + images[i].State = GroupImageState(cImages[i].state) + } + + // free C memory allocated by C.rbd_group_image_list call + ret := C.rbd_group_image_list_cleanup( + (*C.rbd_group_image_info_t)(unsafe.Pointer(&cImages[0])), + C.sizeof_rbd_group_image_info_t, + cSize) + return images, getError(ret) +} + +// GroupInfo contains the name and pool id of a RBD group. +type GroupInfo struct { + Name string + PoolID int64 +} + +// GetGroup returns group info for the group this image is part of. +// +// Implements: +// +// int rbd_get_group(rbd_image_t image, rbd_group_info_t *group_info, +// size_t group_info_size); +func (image *Image) GetGroup() (GroupInfo, error) { + if err := image.validate(imageIsOpen); err != nil { + return GroupInfo{}, err + } + + var cgi C.rbd_group_info_t + ret := C.rbd_get_group( + image.image, + &cgi, + C.sizeof_rbd_group_info_t) + if err := getErrorIfNegative(ret); err != nil { + return GroupInfo{}, err + } + + gi := GroupInfo{ + Name: C.GoString(cgi.name), + PoolID: int64(cgi.pool), + } + ret = C.rbd_group_info_cleanup(&cgi, C.sizeof_rbd_group_info_t) + return gi, getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/group_snap.go b/vendor/github.com/ceph/go-ceph/rbd/group_snap.go new file mode 100644 index 0000000000..527e99fc56 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/group_snap.go @@ -0,0 +1,229 @@ +package rbd + +/* +#cgo LDFLAGS: -lrbd +#include +#include +#include + +extern int snapRollbackCallback(uint64_t, uint64_t, uintptr_t); + +// inline wrapper to cast uintptr_t to void* +static inline int wrap_rbd_group_snap_rollback_with_progress( + rados_ioctx_t group_p, const char *group_name, + const char *snap_name, uintptr_t arg) { + return rbd_group_snap_rollback_with_progress( + group_p, group_name, snap_name, (librbd_progress_fn_t)snapRollbackCallback, (void*)arg); +}; +*/ +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/callbacks" + "github.com/ceph/go-ceph/internal/retry" + "github.com/ceph/go-ceph/rados" +) + +// GroupSnapCreate will create a group snapshot. +// +// Implements: +// +// int rbd_group_snap_create(rados_ioctx_t group_p, +// const char *group_name, +// const char *snap_name); +func GroupSnapCreate(ioctx *rados.IOContext, group, snap string) error { + cGroupName := C.CString(group) + defer C.free(unsafe.Pointer(cGroupName)) + cSnapName := C.CString(snap) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rbd_group_snap_create(cephIoctx(ioctx), cGroupName, cSnapName) + return getError(ret) +} + +// GroupSnapRemove removes an existing group snapshot. +// +// Implements: +// +// int rbd_group_snap_remove(rados_ioctx_t group_p, +// const char *group_name, +// const char *snap_name); +func GroupSnapRemove(ioctx *rados.IOContext, group, snap string) error { + cGroupName := C.CString(group) + defer C.free(unsafe.Pointer(cGroupName)) + cSnapName := C.CString(snap) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rbd_group_snap_remove(cephIoctx(ioctx), cGroupName, cSnapName) + return getError(ret) +} + +// GroupSnapRename will rename an existing group snapshot. +// +// Implements: +// +// int rbd_group_snap_rename(rados_ioctx_t group_p, +// const char *group_name, +// const char *old_snap_name, +// const char *new_snap_name); +func GroupSnapRename(ioctx *rados.IOContext, group, src, dest string) error { + cGroupName := C.CString(group) + defer C.free(unsafe.Pointer(cGroupName)) + cOldSnapName := C.CString(src) + defer C.free(unsafe.Pointer(cOldSnapName)) + cNewSnapName := C.CString(dest) + defer C.free(unsafe.Pointer(cNewSnapName)) + + ret := C.rbd_group_snap_rename( + cephIoctx(ioctx), cGroupName, cOldSnapName, cNewSnapName) + return getError(ret) +} + +// GroupSnapState represents the state of a group snapshot in GroupSnapInfo. +type GroupSnapState int + +const ( + // GroupSnapStateIncomplete is equivalent to RBD_GROUP_SNAP_STATE_INCOMPLETE. + GroupSnapStateIncomplete = GroupSnapState(C.RBD_GROUP_SNAP_STATE_INCOMPLETE) + // GroupSnapStateComplete is equivalent to RBD_GROUP_SNAP_STATE_COMPLETE. + GroupSnapStateComplete = GroupSnapState(C.RBD_GROUP_SNAP_STATE_COMPLETE) +) + +// GroupSnapInfo values are returned by GroupSnapList, representing the +// snapshots that are part of an rbd group. +type GroupSnapInfo struct { + Name string + State GroupSnapState +} + +// GroupSnapList returns a slice of snapshots in a group. +// +// Implements: +// +// int rbd_group_snap_list(rados_ioctx_t group_p, +// const char *group_name, +// rbd_group_snap_info_t *snaps, +// size_t group_snap_info_size, +// size_t *num_entries); +func GroupSnapList(ioctx *rados.IOContext, group string) ([]GroupSnapInfo, error) { + cGroupName := C.CString(group) + defer C.free(unsafe.Pointer(cGroupName)) + + var ( + cSnaps []C.rbd_group_snap_info_t + cSize C.size_t + err error + ) + retry.WithSizes(1024, 262144, func(size int) retry.Hint { + cSize = C.size_t(size) + cSnaps = make([]C.rbd_group_snap_info_t, cSize) + ret := C.rbd_group_snap_list( + cephIoctx(ioctx), + cGroupName, + (*C.rbd_group_snap_info_t)(unsafe.Pointer(&cSnaps[0])), + C.sizeof_rbd_group_snap_info_t, + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + + if err != nil { + return nil, err + } + + snaps := make([]GroupSnapInfo, cSize) + for i := range snaps { + snaps[i].Name = C.GoString(cSnaps[i].name) + snaps[i].State = GroupSnapState(cSnaps[i].state) + } + + // free C memory allocated by C.rbd_group_snap_list call + ret := C.rbd_group_snap_list_cleanup( + (*C.rbd_group_snap_info_t)(unsafe.Pointer(&cSnaps[0])), + C.sizeof_rbd_group_snap_info_t, + cSize) + return snaps, getError(ret) +} + +// GroupSnapRollback will roll back the images in the group to that of the +// given snapshot. +// +// Implements: +// +// int rbd_group_snap_rollback(rados_ioctx_t group_p, +// const char *group_name, +// const char *snap_name); +func GroupSnapRollback(ioctx *rados.IOContext, group, snap string) error { + cGroupName := C.CString(group) + defer C.free(unsafe.Pointer(cGroupName)) + cSnapName := C.CString(snap) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rbd_group_snap_rollback(cephIoctx(ioctx), cGroupName, cSnapName) + return getError(ret) +} + +// GroupSnapRollbackCallback defines the function signature needed for the +// GroupSnapRollbackWithProgress callback. +// +// This callback will be called by GroupSnapRollbackWithProgress when it +// wishes to report progress rolling back a group snapshot. +type GroupSnapRollbackCallback func(uint64, uint64, interface{}) int + +var groupSnapRollbackCallbacks = callbacks.New() + +// GroupSnapRollbackWithProgress will roll back the images in the group +// to that of given snapshot. The given progress callback will be called +// to report on the progress of the snapshot rollback. +// +// Implements: +// +// int rbd_group_snap_rollback_with_progress(rados_ioctx_t group_p, +// const char *group_name, +// const char *snap_name, +// librbd_progress_fn_t cb, +// void *cbdata); +func GroupSnapRollbackWithProgress( + ioctx *rados.IOContext, group, snap string, + cb GroupSnapRollbackCallback, data interface{}) error { + // the provided callback must be a real function + if cb == nil { + return rbdError(C.EINVAL) + } + + cGroupName := C.CString(group) + defer C.free(unsafe.Pointer(cGroupName)) + cSnapName := C.CString(snap) + defer C.free(unsafe.Pointer(cSnapName)) + + ctx := gsnapRollbackCallbackCtx{ + callback: cb, + data: data, + } + cbIndex := groupSnapRollbackCallbacks.Add(ctx) + defer diffIterateCallbacks.Remove(cbIndex) + + ret := C.wrap_rbd_group_snap_rollback_with_progress( + cephIoctx(ioctx), + cGroupName, + cSnapName, + C.uintptr_t(cbIndex)) + + return getError(ret) +} + +type gsnapRollbackCallbackCtx struct { + callback GroupSnapRollbackCallback + data interface{} +} + +//export snapRollbackCallback +func snapRollbackCallback( + offset, total C.uint64_t, index uintptr) C.int { + + v := groupSnapRollbackCallbacks.Lookup(index) + ctx := v.(gsnapRollbackCallbackCtx) + return C.int(ctx.callback(uint64(offset), uint64(total), ctx.data)) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/locks.go b/vendor/github.com/ceph/go-ceph/rbd/locks.go new file mode 100644 index 0000000000..e5537c1578 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/locks.go @@ -0,0 +1,140 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +// #include +import "C" + +import ( + "unsafe" +) + +// LockMode represents a group of configurable lock modes. +type LockMode C.rbd_lock_mode_t + +const ( + // LockModeExclusive is the representation of RBD_LOCK_MODE_EXCLUSIVE from librbd. + LockModeExclusive = LockMode(C.RBD_LOCK_MODE_EXCLUSIVE) + // LockModeShared is the representation of RBD_LOCK_MODE_SHARED from librbd. + LockModeShared = LockMode(C.RBD_LOCK_MODE_SHARED) +) + +// LockAcquire takes a lock on the given image as per the provided lock_mode. +// +// Implements: +// +// int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode); +func (image *Image) LockAcquire(lockMode LockMode) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + ret := C.rbd_lock_acquire(image.image, C.rbd_lock_mode_t(lockMode)) + + return getError(ret) +} + +// LockBreak breaks the lock of lock_mode on the provided lock_owner. +// +// Implements: +// +// int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode, +// const char *lock_owner); +func (image *Image) LockBreak(lockMode LockMode, lockOwner string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cLockOwner := C.CString(lockOwner) + defer C.free(unsafe.Pointer(cLockOwner)) + + ret := C.rbd_lock_break(image.image, C.rbd_lock_mode_t(lockMode), cLockOwner) + + return getError(ret) +} + +// LockOwner represents information about a lock owner. +type LockOwner struct { + Mode LockMode + Owner string +} + +// LockGetOwners fetches the list of lock owners. +// +// Implements: +// +// int rbd_lock_get_owners(rbd_image_t image, rbd_lock_mode_t *lock_mode, +// char **lock_owners, size_t *max_lock_owners); +func (image *Image) LockGetOwners() ([]*LockOwner, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + var ( + maxLockOwners = C.size_t(8) + cLockOwners = make([]*C.char, 8) + lockMode LockMode + lockOwnersList []*LockOwner + ) + + for { + ret := C.rbd_lock_get_owners(image.image, (*C.rbd_lock_mode_t)(&lockMode), &cLockOwners[0], &maxLockOwners) + if ret >= 0 { + break + } else if ret == -C.ENOENT { + return nil, nil + } else if ret != -C.ERANGE { + return nil, getError(ret) + } + } + + defer C.rbd_lock_get_owners_cleanup(&cLockOwners[0], maxLockOwners) + + for i := 0; i < int(maxLockOwners); i++ { + lockOwnersList = append(lockOwnersList, &LockOwner{ + Mode: LockMode(lockMode), + Owner: C.GoString(cLockOwners[i]), + }) + } + + return lockOwnersList, nil +} + +// LockIsExclusiveOwner gets the status of the image exclusive lock. +// +// Implements: +// +// int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner); +func (image *Image) LockIsExclusiveOwner() (bool, error) { + if err := image.validate(imageIsOpen); err != nil { + return false, err + } + + cIsOwner := C.int(0) + + ret := C.rbd_is_exclusive_lock_owner(image.image, &cIsOwner) + if ret != 0 { + return false, getError(ret) + } + + return cIsOwner == 1, nil +} + +// LockRelease releases a lock on the image. +// +// Implements: +// +// int rbd_lock_release(rbd_image_t image); +func (image *Image) LockRelease() error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + ret := C.rbd_lock_release(image.image) + + return getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/metadata.go b/vendor/github.com/ceph/go-ceph/rbd/metadata.go new file mode 100644 index 0000000000..5419dc274e --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/metadata.go @@ -0,0 +1,153 @@ +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" +) + +// GetMetadata returns the metadata string associated with the given key. +// +// Implements: +// +// int rbd_metadata_get(rbd_image_t image, const char *key, char *value, size_t *vallen) +func (image *Image) GetMetadata(key string) (string, error) { + if err := image.validate(imageIsOpen); err != nil { + return "", err + } + + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + var ( + buf []byte + err error + ) + retry.WithSizes(4096, 262144, func(size int) retry.Hint { + csize := C.size_t(size) + buf = make([]byte, csize) + // rbd_metadata_get is a bit quirky and *does not* update the size + // value if the size passed in >= the needed size. + ret := C.rbd_metadata_get( + image.image, cKey, (*C.char)(unsafe.Pointer(&buf[0])), &csize) + err = getError(ret) + return retry.Size(int(csize)).If(err == errRange) + }) + if err != nil { + return "", err + } + return C.GoString((*C.char)(unsafe.Pointer(&buf[0]))), nil +} + +// SetMetadata updates the metadata string associated with the given key. +// +// Implements: +// +// int rbd_metadata_set(rbd_image_t image, const char *key, const char *value) +func (image *Image) SetMetadata(key string, value string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cKey := C.CString(key) + cValue := C.CString(value) + defer C.free(unsafe.Pointer(cKey)) + defer C.free(unsafe.Pointer(cValue)) + + ret := C.rbd_metadata_set(image.image, cKey, cValue) + if ret < 0 { + return rbdError(ret) + } + + return nil +} + +// RemoveMetadata clears the metadata associated with the given key. +// +// Implements: +// +// int rbd_metadata_remove(rbd_image_t image, const char *key) +func (image *Image) RemoveMetadata(key string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + ret := C.rbd_metadata_remove(image.image, cKey) + if ret < 0 { + return rbdError(ret) + } + + return nil +} + +// ListMetadata returns a map containing all metadata assigned to the RBD image. +// +// Implements: +// +// int rbd_metadata_list(rbd_image_t image, const char *start, uint64_t max, +// char *keys, size_t *key_len, char *values, size_t *vals_len); +func (image *Image) ListMetadata() (map[string]string, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + var ( + err error + keysbuf []byte + keysSize C.size_t + valsbuf []byte + valsSize C.size_t + ) + retry.WithSizes(4096, 262144, func(size int) retry.Hint { + keysbuf = make([]byte, size) + keysSize = C.size_t(size) + valsbuf = make([]byte, size) + valsSize = C.size_t(size) + // the rbd_metadata_list function can use a start point and a limit. + // we do not use it and prefer our retry helper and just allocating + // buffers large enough to take all the keys and values + ret := C.rbd_metadata_list( + image.image, + (*C.char)(unsafe.Pointer(&empty[0])), // always start at the beginning (no paging) + 0, // fetch all key-value pairs + (*C.char)(unsafe.Pointer(&keysbuf[0])), + &keysSize, + (*C.char)(unsafe.Pointer(&valsbuf[0])), + &valsSize) + + err = getError(ret) + nextSize := valsSize + if keysSize > nextSize { + nextSize = keysSize + } + return retry.Size(int(nextSize)).If(err == errRange) + }) + if err != nil { + return nil, err + } + + m := map[string]string{} + keys := cutil.SplitBuffer(keysbuf[:keysSize]) + vals := cutil.SplitBuffer(valsbuf[:valsSize]) + if len(keys) != len(vals) { + // this should not happen (famous last words) + return nil, errRange + } + for i := range keys { + m[keys[i]] = vals[i] + } + return m, nil +} + +// rather than allocate memory every time that ListMetadata is called, +// define a static byte slice to stand in for the C "empty string" +var empty = []byte{0} diff --git a/vendor/github.com/ceph/go-ceph/rbd/migration.go b/vendor/github.com/ceph/go-ceph/rbd/migration.go new file mode 100644 index 0000000000..5e567f3fbc --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/migration.go @@ -0,0 +1,219 @@ +//go:build !(octopus || nautilus) +// +build !octopus,!nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/rados" +) + +// MigrationImageState denotes the current migration status of a given image. +type MigrationImageState int + +const ( + // MigrationImageUnknown is the representation of + // RBD_IMAGE_MIGRATION_STATE_UNKNOWN from librbd. + MigrationImageUnknown = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_UNKNOWN) + // MigrationImageError is the representation of + // RBD_IMAGE_MIGRATION_STATE_ERROR from librbd. + MigrationImageError = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_ERROR) + // MigrationImagePreparing is the representation of + // RBD_IMAGE_MIGRATION_STATE_PREPARING from librbd. + MigrationImagePreparing = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_PREPARING) + // MigrationImagePrepared is the representation of + // RBD_IMAGE_MIGRATION_STATE_PREPARED from librbd. + MigrationImagePrepared = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_PREPARED) + // MigrationImageExecuting is the representation of + // RBD_IMAGE_MIGRATION_STATE_EXECUTING from librbd. + MigrationImageExecuting = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_EXECUTING) + // MigrationImageExecuted is the representation of + // RBD_IMAGE_MIGRATION_STATE_EXECUTED from librbd. + MigrationImageExecuted = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_EXECUTED) + // MigrationImageAborting is the representation of + // RBD_IMAGE_MIGRATION_STATE_ABORTING from librbd. + MigrationImageAborting = MigrationImageState(C.RBD_IMAGE_MIGRATION_STATE_ABORTING) +) + +// MigrationImageStatus provides information about the +// live migration progress of an image. +type MigrationImageStatus struct { + SourcePoolID int + SourcePoolNamespace string + SourceImageName string + SourceImageID string + DestPoolID int + DestPoolNamespace string + DestImageName string + DestImageID string + State MigrationImageState + StateDescription string +} + +// MigrationPrepare prepares a migration +// creating a target image with a link +// to source and making source read-only. +// +// Implements: +// +// int rbd_migration_prepare(rados_ioctx_t ioctx, +// const char *image_name, +// rados_ioctx_t dest_ioctx, +// const char *dest_image_name, +// rbd_image_options_t opts); +func MigrationPrepare(ioctx *rados.IOContext, sourceImageName string, destIoctx *rados.IOContext, destImageName string, rio *ImageOptions) error { + cSourceImageName := C.CString(sourceImageName) + cDestImageName := C.CString(destImageName) + defer func() { + C.free(unsafe.Pointer(cSourceImageName)) + C.free(unsafe.Pointer(cDestImageName)) + }() + + ret := C.rbd_migration_prepare( + cephIoctx(ioctx), + cSourceImageName, + cephIoctx(destIoctx), + cDestImageName, + C.rbd_image_options_t(rio.options)) + + return getError(ret) +} + +// MigrationPrepareImport prepares a migration for import +// from a specified source to a new target image. +// +// Implements: +// +// int rbd_migration_prepare_import(const char *source_spec, +// rados_ioctx_t dest_ioctx, +// const char *dest_image_name, +// rbd_image_options_t opts); +func MigrationPrepareImport(sourceSpec string, ioctx *rados.IOContext, destImageName string, rio *ImageOptions) error { + cSourceSpec := C.CString(sourceSpec) + cDestImageName := C.CString(destImageName) + defer func() { + C.free(unsafe.Pointer(cSourceSpec)) + C.free(unsafe.Pointer(cDestImageName)) + }() + + ret := C.rbd_migration_prepare_import( + cSourceSpec, + cephIoctx(ioctx), + cDestImageName, + C.rbd_image_options_t(rio.options)) + + return getError(ret) +} + +// MigrationExecute starts copying the image blocks +// from the source image to the target image. +// +// Implements: +// +// int rbd_migration_execute(rados_ioctx_t ioctx, +// const char *image_name); +func MigrationExecute(ioctx *rados.IOContext, name string) error { + cName := C.CString(name) + + defer func() { + C.free(unsafe.Pointer(cName)) + }() + + ret := C.rbd_migration_execute( + cephIoctx(ioctx), + cName) + return getError(ret) +} + +// MigrationCommit commits a migration after execution +// breaking the relationship of image to the source. +// +// Implements: +// +// int rbd_migration_commit(rados_ioctx_t ioctx, +// const char *image_name); +func MigrationCommit(ioctx *rados.IOContext, name string) error { + cName := C.CString(name) + + defer func() { + C.free(unsafe.Pointer(cName)) + }() + + ret := C.rbd_migration_commit( + cephIoctx(ioctx), + cName) + return getError(ret) +} + +// MigrationAbort aborts a migration in progress +// breaking the relationship of image to the source. +// +// Implements: +// +// int rbd_migration_abort(rados_ioctx_t ioctx, +// const char *image_name); +func MigrationAbort(ioctx *rados.IOContext, name string) error { + cName := C.CString(name) + + defer func() { + C.free(unsafe.Pointer(cName)) + }() + + ret := C.rbd_migration_abort( + cephIoctx(ioctx), + cName) + return getError(ret) +} + +// MigrationStatus retrieve status of a live migration +// for the specified image. +// +// Implements: +// +// int rbd_migration_status(rados_ioctx_t ioctx, +// const char *image_name, +// rbd_image_migration_status_t *status, +// size_t status_size); +func MigrationStatus(ioctx *rados.IOContext, name string) (*MigrationImageStatus, error) { + cName := C.CString(name) + + defer func() { + C.free(unsafe.Pointer(cName)) + }() + + var status C.rbd_image_migration_status_t + ret := C.rbd_migration_status( + cephIoctx(ioctx), + cName, + &status, + C.sizeof_rbd_image_migration_status_t) + + if ret != 0 { + return nil, getError(ret) + } + + defer func() { + C.rbd_migration_status_cleanup(&status) + }() + + return &MigrationImageStatus{ + SourcePoolID: int(status.source_pool_id), + SourcePoolNamespace: C.GoString(status.source_pool_namespace), + SourceImageName: C.GoString(status.source_image_name), + SourceImageID: C.GoString(status.source_image_id), + DestPoolID: int(status.dest_pool_id), + DestPoolNamespace: C.GoString(status.dest_pool_namespace), + DestImageName: C.GoString(status.dest_image_name), + DestImageID: C.GoString(status.dest_image_id), + State: MigrationImageState(status.state), + StateDescription: C.GoString(status.state_description), + }, nil + +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/mirror.go b/vendor/github.com/ceph/go-ceph/rbd/mirror.go new file mode 100644 index 0000000000..bd85536b0e --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/mirror.go @@ -0,0 +1,1058 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" + "github.com/ceph/go-ceph/rados" +) + +// MirrorMode is used to indicate an approach used for RBD mirroring. +type MirrorMode int64 + +const ( + // MirrorModeDisabled disables mirroring. + MirrorModeDisabled = MirrorMode(C.RBD_MIRROR_MODE_DISABLED) + // MirrorModeImage enables mirroring on a per-image basis. + MirrorModeImage = MirrorMode(C.RBD_MIRROR_MODE_IMAGE) + // MirrorModePool enables mirroring on all journaled images. + MirrorModePool = MirrorMode(C.RBD_MIRROR_MODE_POOL) +) + +// String representation of MirrorMode. +func (m MirrorMode) String() string { + switch m { + case MirrorModeDisabled: + return "disabled" + case MirrorModeImage: + return "image" + case MirrorModePool: + return "pool" + default: + return "" + } +} + +// ImageMirrorMode is used to indicate the mirroring approach for an RBD image. +type ImageMirrorMode int64 + +// ImageMirrorModeFilter is a ImageMirrorMode or nil for no filtering +type ImageMirrorModeFilter interface { + mode() ImageMirrorMode +} + +// Mode returns the ImageMirrorMode +func (imm ImageMirrorMode) mode() ImageMirrorMode { + return imm +} + +const ( + // ImageMirrorModeJournal uses journaling to propagate RBD images between + // ceph clusters. + ImageMirrorModeJournal = ImageMirrorMode(C.RBD_MIRROR_IMAGE_MODE_JOURNAL) + // ImageMirrorModeSnapshot uses snapshots to propagate RBD images between + // ceph clusters. + ImageMirrorModeSnapshot = ImageMirrorMode(C.RBD_MIRROR_IMAGE_MODE_SNAPSHOT) +) + +// String representation of ImageMirrorMode. +func (imm ImageMirrorMode) String() string { + switch imm { + case ImageMirrorModeJournal: + return "journal" + case ImageMirrorModeSnapshot: + return "snapshot" + default: + return "" + } +} + +// GetMirrorUUID returns a string naming the mirroring uuid for the pool +// associated with the ioctx. +// +// Implements: +// +// int rbd_mirror_uuid_get(rados_ioctx_t io_ctx, char *uuid, size_t +// *max_len); +func GetMirrorUUID(ioctx *rados.IOContext) (string, error) { + var ( + err error + buf []byte + cSize C.size_t + ) + retry.WithSizes(1024, 1<<16, func(size int) retry.Hint { + cSize = C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_mirror_uuid_get( + cephIoctx(ioctx), + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + if err != nil { + return "", err + } + return string(buf[:cSize]), nil +} + +// SetMirrorMode is used to enable or disable pool level mirroring with either +// an automatic or per-image behavior. +// +// Implements: +// +// int rbd_mirror_mode_set(rados_ioctx_t io_ctx, +// rbd_mirror_mode_t mirror_mode); +func SetMirrorMode(ioctx *rados.IOContext, mode MirrorMode) error { + ret := C.rbd_mirror_mode_set( + cephIoctx(ioctx), + C.rbd_mirror_mode_t(mode)) + return getError(ret) +} + +// GetMirrorMode is used to fetch the current mirroring mode for a pool. +// +// Implements: +// +// int rbd_mirror_mode_get(rados_ioctx_t io_ctx, +// rbd_mirror_mode_t *mirror_mode); +func GetMirrorMode(ioctx *rados.IOContext) (MirrorMode, error) { + var mode C.rbd_mirror_mode_t + + ret := C.rbd_mirror_mode_get( + cephIoctx(ioctx), + &mode) + if err := getError(ret); err != nil { + return MirrorModeDisabled, err + } + return MirrorMode(mode), nil +} + +// MirrorEnable will enable mirroring for an image using the specified mode. +// +// Implements: +// +// int rbd_mirror_image_enable2(rbd_image_t image, +// rbd_mirror_image_mode_t mode); +func (image *Image) MirrorEnable(mode ImageMirrorMode) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + ret := C.rbd_mirror_image_enable2(image.image, C.rbd_mirror_image_mode_t(mode)) + return getError(ret) +} + +// MirrorDisable will disable mirroring for the image. +// +// Implements: +// +// int rbd_mirror_image_disable(rbd_image_t image, bool force); +func (image *Image) MirrorDisable(force bool) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + ret := C.rbd_mirror_image_disable(image.image, C.bool(force)) + return getError(ret) +} + +// MirrorPromote will promote the image to primary status. +// +// Implements: +// +// int rbd_mirror_image_promote(rbd_image_t image, bool force); +func (image *Image) MirrorPromote(force bool) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + ret := C.rbd_mirror_image_promote(image.image, C.bool(force)) + return getError(ret) +} + +// MirrorDemote will demote the image to secondary status. +// +// Implements: +// +// int rbd_mirror_image_demote(rbd_image_t image); +func (image *Image) MirrorDemote() error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + ret := C.rbd_mirror_image_demote(image.image) + return getError(ret) +} + +// MirrorResync is used to manually resolve split-brain status by triggering +// resynchronization. +// +// Implements: +// +// int rbd_mirror_image_resync(rbd_image_t image); +func (image *Image) MirrorResync() error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + ret := C.rbd_mirror_image_resync(image.image) + return getError(ret) +} + +// MirrorInstanceID returns a string naming the instance id for the image. +// +// Implements: +// +// int rbd_mirror_image_get_instance_id(rbd_image_t image, +// char *instance_id, +// size_t *id_max_length); +func (image *Image) MirrorInstanceID() (string, error) { + if err := image.validate(imageIsOpen); err != nil { + return "", err + } + var ( + err error + buf []byte + cSize C.size_t + ) + retry.WithSizes(1024, 1<<16, func(size int) retry.Hint { + cSize = C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_mirror_image_get_instance_id( + image.image, + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + if err != nil { + return "", err + } + return string(buf[:cSize]), nil +} + +// MirrorImageState represents the mirroring state of a RBD image. +type MirrorImageState C.rbd_mirror_image_state_t + +const ( + // MirrorImageDisabling is the representation of + // RBD_MIRROR_IMAGE_DISABLING from librbd. + MirrorImageDisabling = MirrorImageState(C.RBD_MIRROR_IMAGE_DISABLING) + // MirrorImageEnabled is the representation of + // RBD_MIRROR_IMAGE_ENABLED from librbd. + MirrorImageEnabled = MirrorImageState(C.RBD_MIRROR_IMAGE_ENABLED) + // MirrorImageDisabled is the representation of + // RBD_MIRROR_IMAGE_DISABLED from librbd. + MirrorImageDisabled = MirrorImageState(C.RBD_MIRROR_IMAGE_DISABLED) +) + +// String representation of MirrorImageState. +func (mis MirrorImageState) String() string { + switch mis { + case MirrorImageDisabling: + return "disabling" + case MirrorImageEnabled: + return "enabled" + case MirrorImageDisabled: + return "disabled" + default: + return "" + } +} + +// MirrorImageInfo represents the mirroring status information of a RBD image. +type MirrorImageInfo struct { + GlobalID string + State MirrorImageState + Primary bool +} + +func convertMirrorImageInfo(cInfo *C.rbd_mirror_image_info_t) MirrorImageInfo { + return MirrorImageInfo{ + GlobalID: C.GoString(cInfo.global_id), + State: MirrorImageState(cInfo.state), + Primary: bool(cInfo.primary), + } +} + +// GetMirrorImageInfo fetches the mirroring status information of a RBD image. +// +// Implements: +// +// int rbd_mirror_image_get_info(rbd_image_t image, +// rbd_mirror_image_info_t *mirror_image_info, +// size_t info_size) +func (image *Image) GetMirrorImageInfo() (*MirrorImageInfo, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + var cInfo C.rbd_mirror_image_info_t + + ret := C.rbd_mirror_image_get_info( + image.image, + &cInfo, + C.sizeof_rbd_mirror_image_info_t) + if ret < 0 { + return nil, getError(ret) + } + + mii := convertMirrorImageInfo(&cInfo) + + // free C memory allocated by C.rbd_mirror_image_get_info call + C.rbd_mirror_image_get_info_cleanup(&cInfo) + return &mii, nil +} + +// GetImageMirrorMode fetches the mirroring approach for an RBD image. +// +// Implements: +// +// int rbd_mirror_image_get_mode(rbd_image_t image, rbd_mirror_image_mode_t *mode); +func (image *Image) GetImageMirrorMode() (ImageMirrorMode, error) { + var mode C.rbd_mirror_image_mode_t + if err := image.validate(imageIsOpen); err != nil { + return ImageMirrorMode(mode), err + } + + ret := C.rbd_mirror_image_get_mode(image.image, &mode) + return ImageMirrorMode(mode), getError(ret) +} + +// MirrorImageStatusState is used to indicate the state of a mirrored image +// within the site status info. +type MirrorImageStatusState int64 + +const ( + // MirrorImageStatusStateUnknown is equivalent to MIRROR_IMAGE_STATUS_STATE_UNKNOWN + MirrorImageStatusStateUnknown = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_UNKNOWN) + // MirrorImageStatusStateError is equivalent to MIRROR_IMAGE_STATUS_STATE_ERROR + MirrorImageStatusStateError = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_ERROR) + // MirrorImageStatusStateSyncing is equivalent to MIRROR_IMAGE_STATUS_STATE_SYNCING + MirrorImageStatusStateSyncing = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_SYNCING) + // MirrorImageStatusStateStartingReplay is equivalent to MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY + MirrorImageStatusStateStartingReplay = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY) + // MirrorImageStatusStateReplaying is equivalent to MIRROR_IMAGE_STATUS_STATE_REPLAYING + MirrorImageStatusStateReplaying = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_REPLAYING) + // MirrorImageStatusStateStoppingReplay is equivalent to MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY + MirrorImageStatusStateStoppingReplay = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY) + // MirrorImageStatusStateStopped is equivalent to MIRROR_IMAGE_STATUS_STATE_STOPPED + MirrorImageStatusStateStopped = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_STOPPED) +) + +// String represents the MirrorImageStatusState as a short string. +func (state MirrorImageStatusState) String() (s string) { + switch state { + case MirrorImageStatusStateUnknown: + s = "unknown" + case MirrorImageStatusStateError: + s = "error" + case MirrorImageStatusStateSyncing: + s = "syncing" + case MirrorImageStatusStateStartingReplay: + s = "starting_replay" + case MirrorImageStatusStateReplaying: + s = "replaying" + case MirrorImageStatusStateStoppingReplay: + s = "stopping_replay" + case MirrorImageStatusStateStopped: + s = "stopped" + default: + s = fmt.Sprintf("unknown(%d)", state) + } + return s +} + +// SiteMirrorImageStatus contains information pertaining to the status of +// a mirrored image within a site. +type SiteMirrorImageStatus struct { + MirrorUUID string + State MirrorImageStatusState + Description string + LastUpdate int64 + Up bool +} + +// GlobalMirrorImageStatus contains information pertaining to the global +// status of a mirrored image. It contains general information as well +// as per-site information stored in the SiteStatuses slice. +type GlobalMirrorImageStatus struct { + Name string + Info MirrorImageInfo + SiteStatuses []SiteMirrorImageStatus +} + +// LocalStatus returns one SiteMirrorImageStatus item from the SiteStatuses +// slice that corresponds to the local site's status. If the local status +// is not found than the error ErrNotExist will be returned. +func (gmis GlobalMirrorImageStatus) LocalStatus() (SiteMirrorImageStatus, error) { + var ( + ss SiteMirrorImageStatus + err error = ErrNotExist + ) + for i := range gmis.SiteStatuses { + // I couldn't find it explicitly documented, but a site mirror uuid + // of an empty string indicates that this is the local site. + // This pattern occurs in both the pybind code and ceph c++. + if gmis.SiteStatuses[i].MirrorUUID == "" { + ss = gmis.SiteStatuses[i] + err = nil + break + } + } + return ss, err +} + +type siteArray [cutil.MaxIdx]C.rbd_mirror_image_site_status_t + +// GetGlobalMirrorStatus returns status information pertaining to the state +// of the images's mirroring. +// +// Implements: +// +// int rbd_mirror_image_get_global_status( +// rbd_image_t image, +// rbd_mirror_image_global_status_t *mirror_image_global_status, +// size_t status_size); +func (image *Image) GetGlobalMirrorStatus() (GlobalMirrorImageStatus, error) { + if err := image.validate(imageIsOpen); err != nil { + return GlobalMirrorImageStatus{}, err + } + + s := C.rbd_mirror_image_global_status_t{} + ret := C.rbd_mirror_image_get_global_status( + image.image, + &s, + C.sizeof_rbd_mirror_image_global_status_t) + if err := getError(ret); err != nil { + return GlobalMirrorImageStatus{}, err + } + defer C.rbd_mirror_image_global_status_cleanup(&s) + + status := newGlobalMirrorImageStatus(&s) + return status, nil +} + +func newGlobalMirrorImageStatus( + s *C.rbd_mirror_image_global_status_t) GlobalMirrorImageStatus { + + status := GlobalMirrorImageStatus{ + Name: C.GoString(s.name), + Info: convertMirrorImageInfo(&s.info), + SiteStatuses: make([]SiteMirrorImageStatus, s.site_statuses_count), + } + // use the "Sven Technique" to treat the C pointer as a go slice temporarily + sscs := (*siteArray)(unsafe.Pointer(s.site_statuses))[:s.site_statuses_count:s.site_statuses_count] + for i := C.uint32_t(0); i < s.site_statuses_count; i++ { + ss := sscs[i] + status.SiteStatuses[i] = SiteMirrorImageStatus{ + MirrorUUID: C.GoString(ss.mirror_uuid), + State: MirrorImageStatusState(ss.state), + Description: C.GoString(ss.description), + LastUpdate: int64(ss.last_update), + Up: bool(ss.up), + } + } + return status +} + +// CreateMirrorSnapshot creates a snapshot for image propagation to mirrors. +// +// Implements: +// +// int rbd_mirror_image_create_snapshot(rbd_image_t image, +// uint64_t *snap_id); +func (image *Image) CreateMirrorSnapshot() (uint64, error) { + var snapID C.uint64_t + ret := C.rbd_mirror_image_create_snapshot( + image.image, + &snapID) + return uint64(snapID), getError(ret) +} + +// MirrorImageStatusSummary returns a map of images statuses and the count +// of images with said status. +// +// Implements: +// +// int rbd_mirror_image_status_summary( +// rados_ioctx_t io_ctx, rbd_mirror_image_status_state_t *states, int *counts, +// size_t *maxlen); +func MirrorImageStatusSummary( + ioctx *rados.IOContext) (map[MirrorImageStatusState]uint, error) { + // ideally, we already know the size of the arrays - they should be + // the size of all the values of the rbd_mirror_image_status_state_t + // enum. But the C api doesn't enforce this so we give a little + // wiggle room in case the server returns values outside the enum + // we know about. This is the only case I can think of that we'd + // be able to get -ERANGE. + var ( + cioctx = cephIoctx(ioctx) + err error + cStates []C.rbd_mirror_image_status_state_t + cCounts []C.int + cSize C.size_t + ) + retry.WithSizes(16, 1<<16, func(size int) retry.Hint { + cSize = C.size_t(size) + cStates = make([]C.rbd_mirror_image_status_state_t, cSize) + cCounts = make([]C.int, cSize) + ret := C.rbd_mirror_image_status_summary( + cioctx, + (*C.rbd_mirror_image_status_state_t)(&cStates[0]), + (*C.int)(&cCounts[0]), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + if err != nil { + return nil, err + } + + m := map[MirrorImageStatusState]uint{} + for i := 0; i < int(cSize); i++ { + s := MirrorImageStatusState(cStates[i]) + m[s] = uint(cCounts[i]) + } + return m, nil +} + +// SetMirrorSiteName sets the site name, used for rbd mirroring, for the ceph +// cluster associated with the provided rados connection. +// +// Implements: +// +// int rbd_mirror_site_name_set(rados_t cluster, +// const char *name); +func SetMirrorSiteName(conn *rados.Conn, name string) error { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rbd_mirror_site_name_set( + C.rados_t(conn.Cluster()), + cName) + return getError(ret) +} + +// GetMirrorSiteName gets the site name, used for rbd mirroring, for the ceph +// cluster associated with the provided rados connection. +// +// Implements: +// int rbd_mirror_site_name_get(rados_t cluster, +// +// char *name, size_t *max_len); +func GetMirrorSiteName(conn *rados.Conn) (string, error) { + + var ( + cluster = C.rados_t(conn.Cluster()) + err error + buf []byte + cSize C.size_t + ) + retry.WithSizes(1024, 1<<16, func(size int) retry.Hint { + cSize = C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_mirror_site_name_get( + cluster, + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + if err != nil { + return "", err + } + // the C code sets the size including null byte + return string(buf[:cSize-1]), nil +} + +// CreateMirrorPeerBootstrapToken returns a token value, representing the +// cluster and pool associated with the given IO context, that can be provided +// to ImportMirrorPeerBootstrapToken in order to set up mirroring between +// pools. +// +// Implements: +// +// int rbd_mirror_peer_bootstrap_create( +// rados_ioctx_t io_ctx, char *token, size_t *max_len); +func CreateMirrorPeerBootstrapToken(ioctx *rados.IOContext) (string, error) { + var ( + cioctx = cephIoctx(ioctx) + err error + buf []byte + cSize C.size_t + ) + retry.WithSizes(1024, 1<<16, func(size int) retry.Hint { + cSize = C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_mirror_peer_bootstrap_create( + cioctx, + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + if err != nil { + return "", err + } + // the C code sets the size including null byte + return string(buf[:cSize-1]), nil +} + +// MirrorPeerDirection is used to indicate what direction data is mirrored. +type MirrorPeerDirection int + +const ( + // MirrorPeerDirectionRx is equivalent to RBD_MIRROR_PEER_DIRECTION_RX + MirrorPeerDirectionRx = MirrorPeerDirection(C.RBD_MIRROR_PEER_DIRECTION_RX) + // MirrorPeerDirectionTx is equivalent to RBD_MIRROR_PEER_DIRECTION_TX + MirrorPeerDirectionTx = MirrorPeerDirection(C.RBD_MIRROR_PEER_DIRECTION_TX) + // MirrorPeerDirectionRxTx is equivalent to RBD_MIRROR_PEER_DIRECTION_RX_TX + MirrorPeerDirectionRxTx = MirrorPeerDirection(C.RBD_MIRROR_PEER_DIRECTION_RX_TX) +) + +// ImportMirrorPeerBootstrapToken applies the provided bootstrap token to the +// pool associated with the IO context to create a mirroring relationship +// between pools. The direction parameter controls if data in the pool is a +// source, destination, or both. +// +// Implements: +// +// int rbd_mirror_peer_bootstrap_import( +// rados_ioctx_t io_ctx, rbd_mirror_peer_direction_t direction, +// const char *token); +func ImportMirrorPeerBootstrapToken( + ioctx *rados.IOContext, direction MirrorPeerDirection, token string) error { + // instead of taking a length, rbd_mirror_peer_bootstrap_import assumes a + // null terminated "c string". We don't use CString because we don't use + // Go's string type as we don't want to treat the token as something users + // should interpret. If we were doing CString we'd be doing a copy anyway. + cToken := C.CString(token) + defer C.free(unsafe.Pointer(cToken)) + + ret := C.rbd_mirror_peer_bootstrap_import( + cephIoctx(ioctx), + C.rbd_mirror_peer_direction_t(direction), + cToken) + return getError(ret) +} + +// GlobalMirrorImageIDAndStatus values contain an ID string for a RBD image +// and that image's GlobalMirrorImageStatus. +type GlobalMirrorImageIDAndStatus struct { + ID string + Status GlobalMirrorImageStatus +} + +// iterBufSize is intentionally not a constant. The unit tests alter +// this value in order to get more code coverage w/o needing to create +// very many images. +var iterBufSize = 64 + +// MirrorImageGlobalStatusList returns a slice of GlobalMirrorImageIDAndStatus. +// If the length of the returned slice equals max, the next chunk of the list +// can be obtained by setting start to the ID of the last item of the returned +// slice. If max is 0 a slice of all items is returned. +// +// Implements: +// int rbd_mirror_image_status_list(rados_ioctx_t p, +// +// const char *start_id, size_t max, char **image_ids, +// rbd_mirror_image_status_t *images, size_t *len) +func MirrorImageGlobalStatusList( + ioctx *rados.IOContext, start string, max int) ([]GlobalMirrorImageIDAndStatus, error) { + var ( + result []GlobalMirrorImageIDAndStatus + fetchAll bool + ) + if max <= 0 { + max = iterBufSize + fetchAll = true + } + chunk := make([]GlobalMirrorImageIDAndStatus, max) + for { + length, err := mirrorImageGlobalStatusList(ioctx, start, chunk) + if err != nil { + return nil, err + } + result = append(result, chunk[:length]...) + if !fetchAll || length < max { + break + } + start = chunk[length-1].ID + } + return result, nil +} + +func mirrorImageGlobalStatusList( + ioctx *rados.IOContext, start string, + results []GlobalMirrorImageIDAndStatus) (int, error) { + // this C function is treated like a "batch" iterator. Based on it's + // design it appears expected to call it multiple times to get + // the entire result. + cStart := C.CString(start) + defer C.free(unsafe.Pointer(cStart)) + + var ( + max = C.size_t(len(results)) + length = C.size_t(0) + ids = make([]*C.char, len(results)) + images = make([]C.rbd_mirror_image_global_status_t, len(results)) + ) + ret := C.rbd_mirror_image_global_status_list( + cephIoctx(ioctx), + cStart, + max, + &ids[0], + &images[0], + &length) + if err := getError(ret); err != nil { + return 0, err + } + for i := 0; i < int(length); i++ { + results[i].ID = C.GoString(ids[i]) + results[i].Status = newGlobalMirrorImageStatus(&images[i]) + } + C.rbd_mirror_image_global_status_list_cleanup( + &ids[0], + &images[0], + length) + return int(length), getError(ret) +} + +// MirrorImageGlobalStatusIter provide methods for iterating over all +// the GlobalMirrorImageIdAndStatus values in a pool. +type MirrorImageGlobalStatusIter struct { + ioctx *rados.IOContext + + buf []GlobalMirrorImageIDAndStatus + lastID string +} + +// NewMirrorImageGlobalStatusIter creates a new iterator type ready for use. +func NewMirrorImageGlobalStatusIter(ioctx *rados.IOContext) *MirrorImageGlobalStatusIter { + return &MirrorImageGlobalStatusIter{ + ioctx: ioctx, + } +} + +// Next fetches one GlobalMirrorImageIDAndStatus value or a nil value if +// iteration is exhausted. The error return will be non-nil if an underlying +// error fetching more values occurred. +func (iter *MirrorImageGlobalStatusIter) Next() (*GlobalMirrorImageIDAndStatus, error) { + if len(iter.buf) == 0 { + if err := iter.fetch(); err != nil { + return nil, err + } + } + if len(iter.buf) == 0 { + return nil, nil + } + item := iter.buf[0] + iter.lastID = item.ID + iter.buf = iter.buf[1:] + return &item, nil +} + +// Close terminates iteration regardless if iteration was completed and +// frees any associated resources. +// +// Deprecated: not required +func (*MirrorImageGlobalStatusIter) Close() error { + return nil +} + +func (iter *MirrorImageGlobalStatusIter) fetch() error { + iter.buf = nil + items := make([]GlobalMirrorImageIDAndStatus, iterBufSize) + n, err := mirrorImageGlobalStatusList( + iter.ioctx, + iter.lastID, + items) + if err != nil { + return err + } + if n > 0 { + iter.buf = items[:n] + } + return nil +} + +// MirrorImageInfoItem contains an ID string for a RBD image and that image's +// ImageMirrorMode and MirrorImageInfo. +type MirrorImageInfoItem struct { + ID string + Mode ImageMirrorMode + Info MirrorImageInfo +} + +// MirrorImageInfoList returns a slice of MirrorImageInfoItem. If the length of +// the returned slice equals max, the next chunk of the list can be obtained by +// setting start to the ID of the last item of the returned slice. The returned +// items are filtered by the mirror mode specified with modeFilter. If max is 0 +// a slice of all items is returned. +// +// Implements: +// int rbd_mirror_image_info_list( +// +// rados_ioctx_t p, rbd_mirror_image_mode_t *mode_filter, +// const char *start_id, size_t max, char **image_ids, +// rbd_mirror_image_mode_t *mode_entries, +// rbd_mirror_image_info_t *info_entries, size_t *num_entries) +func MirrorImageInfoList( + ioctx *rados.IOContext, modeFilter ImageMirrorModeFilter, start string, + max int) ([]MirrorImageInfoItem, error) { + var ( + result []MirrorImageInfoItem + fetchAll bool + ) + if max <= 0 { + max = iterBufSize + fetchAll = true + } + chunk := make([]MirrorImageInfoItem, max) + for { + length, err := mirrorImageInfoList(ioctx, start, modeFilter, chunk) + if err != nil { + return nil, err + } + result = append(result, chunk[:length]...) + if !fetchAll || length < max { + break + } + start = chunk[length-1].ID + } + return result, nil +} + +func mirrorImageInfoList(ioctx *rados.IOContext, start string, + modeFilter ImageMirrorModeFilter, results []MirrorImageInfoItem) (int, error) { + + cStart := C.CString(start) + defer C.free(unsafe.Pointer(cStart)) + + var ( + max = C.size_t(len(results)) + length = C.size_t(0) + ids = make([]*C.char, len(results)) + modes = make([]C.rbd_mirror_image_mode_t, len(results)) + infos = make([]C.rbd_mirror_image_info_t, len(results)) + modeFilterPtr *C.rbd_mirror_image_mode_t + ) + if modeFilter != nil { + cMode := C.rbd_mirror_image_mode_t(modeFilter.mode()) + modeFilterPtr = &cMode + } + ret := C.rbd_mirror_image_info_list( + cephIoctx(ioctx), + modeFilterPtr, + cStart, + max, + &ids[0], + &modes[0], + &infos[0], + &length, + ) + if err := getError(ret); err != nil { + return 0, err + } + for i := 0; i < int(length); i++ { + results[i].ID = C.GoString(ids[i]) + results[i].Mode = ImageMirrorMode(modes[i]) + results[i].Info = convertMirrorImageInfo(&infos[i]) + } + C.rbd_mirror_image_info_list_cleanup( + &ids[0], + &infos[0], + length) + return int(length), getError(ret) +} + +// MirrorImageInfoIter provide methods for iterating over all +// the MirrorImageInfoItem values in a pool. +type MirrorImageInfoIter struct { + ioctx *rados.IOContext + + modeFilter ImageMirrorModeFilter + buf []MirrorImageInfoItem + lastID string +} + +// NewMirrorImageInfoIter creates a new iterator ready for use. +func NewMirrorImageInfoIter(ioctx *rados.IOContext, modeFilter ImageMirrorModeFilter) *MirrorImageInfoIter { + return &MirrorImageInfoIter{ + ioctx: ioctx, + modeFilter: modeFilter, + } +} + +// Next fetches one MirrorImageInfoItem value or a nil value if iteration is +// exhausted. The error return will be non-nil if an underlying error fetching +// more values occurred. +func (iter *MirrorImageInfoIter) Next() (*MirrorImageInfoItem, error) { + if len(iter.buf) == 0 { + if err := iter.fetch(); err != nil { + return nil, err + } + if len(iter.buf) == 0 { + return nil, nil + } + iter.lastID = iter.buf[len(iter.buf)-1].ID + } + item := iter.buf[0] + iter.buf = iter.buf[1:] + return &item, nil +} + +func (iter *MirrorImageInfoIter) fetch() error { + iter.buf = nil + items := make([]MirrorImageInfoItem, iterBufSize) + n, err := mirrorImageInfoList( + iter.ioctx, + iter.lastID, + iter.modeFilter, + items) + if err != nil { + return err + } + if n > 0 { + iter.buf = items[:n] + } + return nil +} + +// MirrorImageInstanceIDItem contains an ID string for a RBD image and +// its corresponding mirrored image's Instance ID. +type MirrorImageInstanceIDItem struct { + ID string + InstanceID string +} + +// MirrorImageInstanceIDList returns a slice of MirrorImageInstanceIDItem. If +// the length of the returned slice equals max, the next chunk of the list can +// be obtained by setting start to the ID of the last item of the returned slice. +// If max is 0 a slice of all items is returned. +// +// Implements: +// int rbd_mirror_image_instance_id_list( +// +// rados_ioctx_t io_ctx, +// const char *start_id, +// size_t max, char **image_ids, +// char **instance_ids, +// size_t *len) +func MirrorImageInstanceIDList( + ioctx *rados.IOContext, start string, + max int) ([]MirrorImageInstanceIDItem, error) { + var ( + result []MirrorImageInstanceIDItem + fetchAll bool + ) + if max <= 0 { + max = iterBufSize + fetchAll = true + } + chunk := make([]MirrorImageInstanceIDItem, max) + for { + length, err := mirrorImageInstanceIDList(ioctx, start, chunk) + if err != nil { + return nil, err + } + result = append(result, chunk[:length]...) + if !fetchAll || length < max { + break + } + start = chunk[length-1].ID + } + return result, nil +} + +func mirrorImageInstanceIDList(ioctx *rados.IOContext, start string, + results []MirrorImageInstanceIDItem) (int, error) { + + cStart := C.CString(start) + defer C.free(unsafe.Pointer(cStart)) + + var ( + max = C.size_t(len(results)) + length = C.size_t(0) + ids = make([]*C.char, len(results)) + instanceIDs = make([]*C.char, len(results)) + ) + ret := C.rbd_mirror_image_instance_id_list( + cephIoctx(ioctx), + cStart, + max, + &ids[0], + &instanceIDs[0], + &length, + ) + if err := getError(ret); err != nil { + return 0, err + } + for i := 0; i < int(length); i++ { + results[i].ID = C.GoString(ids[i]) + results[i].InstanceID = C.GoString(instanceIDs[i]) + } + C.rbd_mirror_image_instance_id_list_cleanup( + &ids[0], + &instanceIDs[0], + length) + return int(length), getError(ret) +} + +// MirrorImageInstanceIDIter provide methods for iterating over all +// the MirrorImageInstanceIDItem values in a pool. +type MirrorImageInstanceIDIter struct { + ioctx *rados.IOContext + + buf []MirrorImageInstanceIDItem + lastID string +} + +// NewMirrorImageInstanceIDIter creates a new iterator ready for use. +func NewMirrorImageInstanceIDIter(ioctx *rados.IOContext) *MirrorImageInstanceIDIter { + return &MirrorImageInstanceIDIter{ + ioctx: ioctx, + } +} + +// Next fetches one MirrorImageInstanceIDItem value or a nil value if iteration is +// exhausted. The error return will be non-nil if an underlying error fetching +// more values occurred. +func (iter *MirrorImageInstanceIDIter) Next() (*MirrorImageInstanceIDItem, error) { + if len(iter.buf) == 0 { + if err := iter.fetch(); err != nil { + return nil, err + } + if len(iter.buf) == 0 { + return nil, nil + } + iter.lastID = iter.buf[len(iter.buf)-1].ID + } + item := iter.buf[0] + iter.buf = iter.buf[1:] + return &item, nil +} + +func (iter *MirrorImageInstanceIDIter) fetch() error { + iter.buf = nil + items := make([]MirrorImageInstanceIDItem, iterBufSize) + n, err := mirrorImageInstanceIDList( + iter.ioctx, + iter.lastID, + items) + if err != nil { + return err + } + if n > 0 { + iter.buf = items[:n] + } + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/mirror_desc_status.go b/vendor/github.com/ceph/go-ceph/rbd/mirror_desc_status.go new file mode 100644 index 0000000000..58bd4b3900 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/mirror_desc_status.go @@ -0,0 +1,72 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "encoding/json" + "strings" +) + +// MirrorDescriptionReplayStatus contains information pertaining to the status +// of snapshot based RBD image mirroring. +type MirrorDescriptionReplayStatus struct { + ReplayState string `json:"replay_state"` + RemoteSnapshotTimestamp int64 `json:"remote_snapshot_timestamp"` + LocalSnapshotTimestamp int64 `json:"local_snapshot_timestamp"` + SyncingSnapshotTimestamp int64 `json:"syncing_snapshot_timestamp"` + SyncingPercent int `json:"syncing_percent"` + BytesPerSecond float64 `json:"bytes_per_second"` + BytesPerSnapshot float64 `json:"bytes_per_snapshot"` + LastSnapshotSyncSeconds int64 `json:"last_snapshot_sync_seconds"` + LastSnapshotBytes int64 `json:"last_snapshot_bytes"` +} + +// extractDescriptionJSON will extract one string containing a JSON object from +// the description if one can be found. +func (s *SiteMirrorImageStatus) extractDescriptionJSON() (string, error) { + start := strings.Index(s.Description, "{") + if start == -1 { + return "", ErrNotExist + } + end := strings.LastIndex(s.Description, "}") + if end == -1 { + return "", ErrNotExist + } + if start >= end { + return "", ErrNotExist + } + return s.Description[start : end+1], nil +} + +// UnmarshalDescriptionJSON parses an embedded JSON string that may be found in +// the description of the SiteMirrorImageStatus. It will store the result in +// the value pointed to by v. If no embedded JSON string is found an +// ErrNotExist error is returned. An error may also be returned if the contents +// can not be parsed. +func (s *SiteMirrorImageStatus) UnmarshalDescriptionJSON(v interface{}) error { + desc, err := s.extractDescriptionJSON() + if err != nil { + return err + } + return json.Unmarshal([]byte(desc), v) +} + +// DescriptionReplayStatus parses a MirrorDescriptionReplayStatus result out of +// the image status description field if available. If the embedded status JSON +// is not found or fails to parse and error will be returned. +func (s *SiteMirrorImageStatus) DescriptionReplayStatus() ( + *MirrorDescriptionReplayStatus, error, +) { + // --- + mdrs := MirrorDescriptionReplayStatus{} + if err := s.UnmarshalDescriptionJSON(&mdrs); err != nil { + return nil, err + } + return &mdrs, nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/mirror_nautilus.go b/vendor/github.com/ceph/go-ceph/rbd/mirror_nautilus.go new file mode 100644 index 0000000000..5fe1550948 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/mirror_nautilus.go @@ -0,0 +1,420 @@ +//go:build nautilus +// +build nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +// #include +// #include +import "C" +import ( + "unsafe" + + "github.com/ceph/go-ceph/rados" +) + +// MirrorMode indicates the current mode of mirroring that is applied onto a +// pool. A pool that doesn't have an explicit mirroring mode applied to it is +// said to be disabled - that's the default. +type MirrorMode int + +const ( + // MirrorModeDisabled disables mirroring. + MirrorModeDisabled = MirrorMode(C.RBD_MIRROR_MODE_DISABLED) + // MirrorModeImage enables mirroring on a per-image basis. + MirrorModeImage = MirrorMode(C.RBD_MIRROR_MODE_IMAGE) + // MirrorModePool enables mirroring on all journaled images. + MirrorModePool = MirrorMode(C.RBD_MIRROR_MODE_POOL) +) + +// MirrorModeGet returns the mode of mirroring currently applied to a pool. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_mode_get(rados_ioctx_t p, rbd_mirror_mode_t *mirror_mode) +func MirrorModeGet(ioctx *rados.IOContext) (MirrorMode, error) { + var rmm C.rbd_mirror_mode_t + + ret := C.rbd_mirror_mode_get(cephIoctx(ioctx), &rmm) + if ret != 0 { + return -1, getError(ret) + } + + return MirrorMode(rmm), nil +} + +// MirrorModeSet sets the mirror mode for a pool. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// rbd_mirror_mode_set(rados_ioctx_t p, rbd_mirror_mode_t mirror_mode) +func MirrorModeSet(ioctx *rados.IOContext, mode MirrorMode) error { + cMode := C.rbd_mirror_mode_t(mode) + + ret := C.rbd_mirror_mode_set(cephIoctx(ioctx), cMode) + + return getError(ret) +} + +// MirrorPeerAdd configures a peering relationship with another cluster. Note +// that it does not transfer over that cluster's config or keyrings, which must +// already be available to the rbd-mirror daemon(s). +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_peer_add(rados_ioctx_t p, char *uuid, +// size_t uuid_max_length, +// const char *cluster_name, +// const char *client_name) +func MirrorPeerAdd(ioctx *rados.IOContext, clusterName, clientName string) (string, error) { + // librbd uses 36-byte UUIDs with a trailing null. rbd_mirror_add_peer will + // return -E2BIG if we pass a UUID buffer smaller than 37 bytes. + const cUUIDMaxLen = C.size_t(37) + cUUID := make([]C.char, cUUIDMaxLen) + + cClusterName := C.CString(clusterName) + defer C.free(unsafe.Pointer(cClusterName)) + + cClientName := C.CString(clientName) + defer C.free(unsafe.Pointer(cClientName)) + + ret := C.rbd_mirror_peer_add(cephIoctx(ioctx), &cUUID[0], cUUIDMaxLen, + cClusterName, cClientName) + + return C.GoString(&cUUID[0]), getError(ret) +} + +// MirrorPeerRemove tears down a peering relationship. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_peer_remove(rados_ioctx_t io_ctx, const char *uuid) +func MirrorPeerRemove(ioctx *rados.IOContext, uuid string) error { + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + ret := C.rbd_mirror_peer_remove(cephIoctx(ioctx), cUUID) + + return getError(ret) +} + +// MirrorPeerInfo contains information about a configured mirroring peer. +type MirrorPeerInfo struct { + UUID string + ClusterName string + ClientName string +} + +// MirrorPeerList returns a list of configured mirroring peers. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_peer_list(rados_ioctx_t io_ctx, +// rbd_mirror_peer_list_t *peers, +// int *max_peers); +func MirrorPeerList(ioctx *rados.IOContext) ([]*MirrorPeerInfo, error) { + var mpi []*MirrorPeerInfo + cMaxPeers := C.int(5) + + var cPeers []C.rbd_mirror_peer_t + for { + cPeers = make([]C.rbd_mirror_peer_t, cMaxPeers) + ret := C.rbd_mirror_peer_list(cephIoctx(ioctx), &cPeers[0], &cMaxPeers) + if ret == -C.ERANGE { + // There are too many peers to fit in the list, and the number of peers has been + // returned in cMaxPeers. Try again with the returned value. + continue + } + if ret != 0 { + return nil, getError(ret) + } + + // ret == 0 + break + } + defer C.rbd_mirror_peer_list_cleanup(&cPeers[0], cMaxPeers) + cPeers = cPeers[:cMaxPeers] + + for _, cPeer := range cPeers { + mpi = append(mpi, &MirrorPeerInfo{ + UUID: C.GoString(cPeer.uuid), + ClusterName: C.GoString(cPeer.cluster_name), + ClientName: C.GoString(cPeer.client_name), + }) + } + + return mpi, nil +} + +// MirrorImageState indicates whether mirroring is enabled or disabled on an +// image. +// +// A mirrored image might not immediately change its status to disabled if it has +// offsets left to sync with its peers - this is denoted by 'disabling' state. +// +// It is important to note that mirroring cannot be enabled on an image without +// first flipping on the 'journaling' image feature for it. +type MirrorImageState int + +const ( + // MirrorImageDisabling is the representation of + // RBD_MIRROR_IMAGE_DISABLING from librbd. + MirrorImageDisabling = MirrorImageState(C.RBD_MIRROR_IMAGE_DISABLING) + // MirrorImageEnabled is the representation of + // RBD_MIRROR_IMAGE_ENABLED from librbd. + MirrorImageEnabled = MirrorImageState(C.RBD_MIRROR_IMAGE_ENABLED) + // MirrorImageDisabled is the representation of + // RBD_MIRROR_IMAGE_DISABLED from librbd. + MirrorImageDisabled = MirrorImageState(C.RBD_MIRROR_IMAGE_DISABLED) +) + +// MirrorImageStatusState denotes the current replication status of a given +// image. +type MirrorImageStatusState int + +const ( + // MirrorImageStatusStateUnknown is equivalent to MIRROR_IMAGE_STATUS_STATE_UNKNOWN. + MirrorImageStatusStateUnknown = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_UNKNOWN) + // MirrorImageStatusStateError is equivalent to MIRROR_IMAGE_STATUS_STATE_ERROR. + MirrorImageStatusStateError = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_ERROR) + // MirrorImageStatusStateSyncing is equivalent to MIRROR_IMAGE_STATUS_STATE_SYNCING. + MirrorImageStatusStateSyncing = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_SYNCING) + // MirrorImageStatusStateStartingReplay is equivalent to MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY. + MirrorImageStatusStateStartingReplay = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_STARTING_REPLAY) + // MirrorImageStatusStateReplaying is equivalent to MIRROR_IMAGE_STATUS_STATE_REPLAYING. + MirrorImageStatusStateReplaying = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_REPLAYING) + // MirrorImageStatusStateStoppingReplay is equivalent to MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY. + MirrorImageStatusStateStoppingReplay = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_STOPPING_REPLAY) + // MirrorImageStatusStateStopped is equivalent to MIRROR_IMAGE_STATUS_STATE_STOPPED. + MirrorImageStatusStateStopped = MirrorImageStatusState(C.MIRROR_IMAGE_STATUS_STATE_STOPPED) +) + +// MirrorImageInfo provides information about the mirroring progress of an image. +type MirrorImageInfo struct { + Name string + Description string + State MirrorImageState + StatusState MirrorImageStatusState + GlobalID string + IsPrimary bool + IsUp bool +} + +// MirrorGetImage returns the MirrorImageInfo for an image. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// rbd_mirror_image_get_info(rbd_image_t image, +// rbd_mirror_image_info_t *mirror_image_info, +// size_t info_size) +func (image *Image) MirrorGetImage() (*MirrorImageInfo, error) { + err := image.validate(imageIsOpen) + if err != nil { + return nil, err + } + + var status C.rbd_mirror_image_status_t + ret := C.rbd_mirror_image_get_status(image.image, &status, C.sizeof_rbd_mirror_image_status_t) + if ret != 0 { + return nil, getError(ret) + } + + return &MirrorImageInfo{ + Name: C.GoString(status.name), + Description: C.GoString(status.description), + State: MirrorImageState(status.info.state), + StatusState: MirrorImageStatusState(status.state), + GlobalID: C.GoString(status.info.global_id), + IsPrimary: bool(status.info.primary), + IsUp: bool(status.up), + }, nil +} + +// MirrorImageList returns a MirrorImageInfo for each mirrored image. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_image_status_list(rados_ioctx_t io_ctx, +// const char *start_id, size_t max, +// char **image_ids, +// rbd_mirror_image_status_t *images, +// size_t *len) +func MirrorImageList(ioctx *rados.IOContext) ([]*MirrorImageInfo, error) { + imageInfos := make([]*MirrorImageInfo, 0) + const cMaxIter C.size_t = 100 + var startID string + + for { + // We need to wrap all the actions within the for loop in a function + // in order to ensure that we correctly reclaim all allocated memory + // from C at the end of every iteration. + ret, done := iterateImageList(ioctx, &imageInfos, &startID, cMaxIter) + if ret != 0 { + return imageInfos, getError(ret) + } + + if done { + break + } + } + return imageInfos, nil +} + +func iterateImageList(ioctx *rados.IOContext, imageInfos *[]*MirrorImageInfo, startID *string, cMaxIter C.size_t) (C.int, bool) { + cImageIDs := make([]*C.char, cMaxIter) + cImageStatus := make([]C.rbd_mirror_image_status_t, cMaxIter) + done := false + + var cLen C.size_t + ret := C.rbd_mirror_image_status_list(cephIoctx(ioctx), C.CString(*startID), + cMaxIter, &cImageIDs[0], &cImageStatus[0], &cLen) + if ret != 0 { + return ret, done + } + + // If the list length is 0 or less than the max size + // specified we know we are on the last page of the list, + // and we don't need to continue iterating. + if cLen < cMaxIter { + done = true + } + + if cLen == 0 { + return C.int(0), done + } + + defer func() { + C.rbd_mirror_image_status_list_cleanup(&cImageIDs[0], &cImageStatus[0], cLen) + }() + + for i := 0; i < int(cLen); i++ { + mi := &MirrorImageInfo{ + Name: C.GoString(cImageStatus[i].name), + Description: C.GoString(cImageStatus[i].description), + State: MirrorImageState(cImageStatus[i].info.state), + StatusState: MirrorImageStatusState(cImageStatus[i].state), + GlobalID: C.GoString(cImageStatus[i].info.global_id), + IsPrimary: bool(cImageStatus[i].info.primary), + IsUp: bool(cImageStatus[i].up), + } + + *imageInfos = append(*imageInfos, mi) + } + + *startID = C.GoString(cImageIDs[cLen-1]) + return C.int(0), done +} + +// MirrorEnable will enable mirroring for an image. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_image_enable(rbd_image_t image) +func (image *Image) MirrorEnable() error { + err := image.validate(imageIsOpen) + if err != nil { + return err + } + + ret := C.rbd_mirror_image_enable(image.image) + return getError(ret) +} + +// MirrorDisable will disable mirroring for an image. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_image_disable(rbd_image_t image, bool force) +func (image *Image) MirrorDisable(force bool) error { + err := image.validate(imageIsOpen) + if err != nil { + return err + } + + ret := C.rbd_mirror_image_disable(image.image, C.bool(force)) + return getError(ret) +} + +// MirrorPromote will promote an image to primary status. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_image_promote(rbd_image_t image, bool force) +func (image *Image) MirrorPromote(force bool) error { + err := image.validate(imageIsOpen) + if err != nil { + return err + } + + ret := C.rbd_mirror_image_promote(image.image, C.bool(force)) + return getError(ret) +} + +// MirrorDemote will demote an image to secondary status. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_image_demote(rbd_image_t image) +func (image *Image) MirrorDemote() error { + err := image.validate(imageIsOpen) + if err != nil { + return err + } + + ret := C.rbd_mirror_image_demote(image.image) + return getError(ret) +} + +// MirrorResync is used to manually resolve split-brain status by triggering +// resynchronization. +// +// Note: this can only be used if go-ceph is compiled with the `nautilus` build +// tag. +// +// Implements: +// +// int rbd_mirror_image_resync(rbd_image_t image) +func (image *Image) MirrorResync() error { + err := image.validate(imageIsOpen) + if err != nil { + return err + } + + ret := C.rbd_mirror_image_resync(image.image) + return getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/mirror_peer_site.go b/vendor/github.com/ceph/go-ceph/rbd/mirror_peer_site.go new file mode 100644 index 0000000000..6fa44a5ec3 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/mirror_peer_site.go @@ -0,0 +1,255 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "bytes" + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" + "github.com/ceph/go-ceph/rados" +) + +// AddMirrorPeerSite adds a peer site to the list of existing sites +// +// Implements: +// +// int rbd_mirror_peer_site_add(rados_ioctx_t p, char *uuid, size_t uuid_max_length, +// rbd_mirror_peer_direction_t direction, +// const char *site_name, +// const char *client_name); +func AddMirrorPeerSite(ioctx *rados.IOContext, siteName string, clientName string, + direction MirrorPeerDirection, +) (string, error) { + var ( + err error + buf []byte + cSize C.size_t + ) + + cSiteName := C.CString(siteName) + defer C.free(unsafe.Pointer(cSiteName)) + cClientName := C.CString(clientName) + defer C.free(unsafe.Pointer(cClientName)) + + retry.WithSizes(512, 1<<16, func(size int) retry.Hint { + cSize = C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_mirror_peer_site_add( + cephIoctx(ioctx), + (*C.char)(unsafe.Pointer(&buf[0])), + cSize, C.rbd_mirror_peer_direction_t(direction), + cSiteName, cClientName) + err = getError(ret) + return retry.Size(int(cSize)).If(err != nil) + }) + if err != nil { + return "", err + } + return string(bytes.Trim(buf[:cSize], "\x00")), nil +} + +// RemoveMirrorPeerSite removes the site with the provided uuid +// +// Implements: +// +// int rbd_mirror_peer_site_remove(rados_ioctx_t p, const char *uuid) +func RemoveMirrorPeerSite(ioctx *rados.IOContext, uuid string) error { + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + ret := C.rbd_mirror_peer_site_remove(cephIoctx(ioctx), cUUID) + + return getError(ret) +} + +// GetAttributesMirrorPeerSite fetches the list of key,value pair of attributes of a peer site +// +// Implements: +// +// int rbd_mirror_peer_site_get_attributes(rados_ioctx_t p, const char *uuid, char *keys, +// size_t *max_key_len, char *values, size_t *max_val_len, +// size_t *key_value_count); +func GetAttributesMirrorPeerSite(ioctx *rados.IOContext, uuid string) (map[string]string, error) { + var ( + err error + keys []byte + vals []byte + keySize C.size_t + valSize C.size_t + count = C.size_t(0) + ) + + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + retry.WithSizes(1024, 1<<16, func(size int) retry.Hint { + keySize = C.size_t(size) + valSize = C.size_t(size) + keys = make([]byte, keySize) + vals = make([]byte, valSize) + ret := C.rbd_mirror_peer_site_get_attributes( + cephIoctx(ioctx), cUUID, (*C.char)(unsafe.Pointer(&keys[0])), + &keySize, (*C.char)(unsafe.Pointer(&vals[0])), &valSize, + &count) + err = getErrorIfNegative(ret) + return retry.Size(int(keySize)).If(err == errRange) + }) + if err != nil { + return nil, err + } + + keyList := cutil.SplitBuffer(keys[:keySize]) + valList := cutil.SplitBuffer(vals[:valSize]) + attributes := map[string]string{} + for i := 0; i < int(len(keyList)); i++ { + attributes[keyList[i]] = valList[i] + } + return attributes, nil +} + +// SetAttributesMirrorPeerSite sets the attributes for the site with the given uuid +// +// Implements: +// +// int rbd_mirror_peer_site_set_attributes(rados_ioctx_t p, const char *uuid, +// const char *keys, const char *values, +// size_t count) ; +func SetAttributesMirrorPeerSite(ioctx *rados.IOContext, uuid string, attributes map[string]string) error { + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + var ( + key string + val string + count = C.size_t(len(attributes)) + ) + + for k, v := range attributes { + key += k + "\000" + val += v + "\000" + } + + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + cVal := C.CString(val) + defer C.free(unsafe.Pointer(cVal)) + + ret := C.rbd_mirror_peer_site_set_attributes(cephIoctx(ioctx), cUUID, cKey, cVal, count) + + return getError(ret) +} + +// MirrorPeerSite is go equivalent of rbd_mirror_peer_site_t struct and contains information +// about a mirroring peer site. Here, we are ignoring the "last_seen" as this property is redundant +// and not updated in the ceph API. Related Ceph issue: https://tracker.ceph.com/issues/59581 +type MirrorPeerSite struct { + UUID string + Direction MirrorPeerDirection + SiteName string + MirrorUUID string + ClientName string +} + +// ListMirrorPeerSite returns the list of peer sites +// +// Implements: +// +// int rbd_mirror_peer_site_list(rados_ioctx_t p, rbd_mirror_peer_site_t *peers, int *max_peers) +func ListMirrorPeerSite(ioctx *rados.IOContext) ([]*MirrorPeerSite, error) { + var mps []*MirrorPeerSite + cMaxPeers := C.int(10) + + var cSites []C.rbd_mirror_peer_site_t + for { + cSites = make([]C.rbd_mirror_peer_site_t, cMaxPeers) + ret := C.rbd_mirror_peer_site_list(cephIoctx(ioctx), &cSites[0], &cMaxPeers) + err := getError(ret) + if err == errRange { + // There are too many peer sites to fit in the list, and the number of peer sites has been + // returned in cMaxPeers. Try again with the returned value. + continue + } + if err != nil { + return nil, err + } + + // ret == 0 + break + } + + defer C.rbd_mirror_peer_site_list_cleanup(&cSites[0], cMaxPeers) + cSites = cSites[:cMaxPeers] + + for _, cSite := range cSites { + mps = append(mps, &MirrorPeerSite{ + UUID: C.GoString(cSite.uuid), + Direction: MirrorPeerDirection(cSite.direction), + SiteName: C.GoString(cSite.site_name), + MirrorUUID: C.GoString(cSite.mirror_uuid), + ClientName: C.GoString(cSite.client_name), + }) + } + + return mps, nil +} + +// SetMirrorPeerSiteClientName sets the client name for a mirror peer site +// +// Implements: +// +// int rbd_mirror_peer_site_set_client_name(rados_ioctx_t p, const char *uuid, +// const char *client_name); +func SetMirrorPeerSiteClientName(ioctx *rados.IOContext, uuid string, clientName string) error { + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + cClientName := C.CString(clientName) + defer C.free(unsafe.Pointer(cClientName)) + + ret := C.rbd_mirror_peer_site_set_client_name(cephIoctx(ioctx), cUUID, cClientName) + + return getError(ret) +} + +// SetMirrorPeerSiteDirection sets the direction of a mirror peer site +// +// Implements: +// +// int rbd_mirror_peer_site_set_direction(rados_ioctx_t p, const char *uuid, +// rbd_mirror_peer_direction_t direction); +func SetMirrorPeerSiteDirection(ioctx *rados.IOContext, uuid string, direction MirrorPeerDirection) error { + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + ret := C.rbd_mirror_peer_site_set_direction(cephIoctx(ioctx), cUUID, + C.rbd_mirror_peer_direction_t(direction)) + + return getError(ret) +} + +// SetMirrorPeerSiteName sets the name of a mirror peer site +// +// Implements: +// +// int rbd_mirror_peer_site_set_name(rados_ioctx_t p, const char *uuid, +// const char *site_name); +func SetMirrorPeerSiteName(ioctx *rados.IOContext, uuid string, siteName string) error { + cUUID := C.CString(uuid) + defer C.free(unsafe.Pointer(cUUID)) + + cSiteName := C.CString(siteName) + defer C.free(unsafe.Pointer(cSiteName)) + + ret := C.rbd_mirror_peer_site_set_name(cephIoctx(ioctx), cUUID, cSiteName) + + return getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/namespace_nautilus.go b/vendor/github.com/ceph/go-ceph/rbd/namespace_nautilus.go new file mode 100644 index 0000000000..7e54ac4f43 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/namespace_nautilus.go @@ -0,0 +1,109 @@ +// +// Ceph Nautilus is the first release that includes rbd_namespace_create(), +// rbd_namespace_remove(), rbd_namespace_exists() and rbd_namespace_list(). + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" + "github.com/ceph/go-ceph/rados" +) + +// NamespaceCreate creates the namespace for a given Rados IOContext. +// +// Implements: +// +// int rbd_namespace_create(rados_ioctx_t io, const char *namespace_name); +func NamespaceCreate(ioctx *rados.IOContext, namespaceName string) error { + if ioctx == nil { + return ErrNoIOContext + } + if namespaceName == "" { + return ErrNoNamespaceName + } + cNamespaceName := C.CString(namespaceName) + defer C.free(unsafe.Pointer(cNamespaceName)) + + ret := C.rbd_namespace_create(cephIoctx(ioctx), cNamespaceName) + return getError(ret) +} + +// NamespaceRemove removes a given namespace. +// +// Implements: +// +// int rbd_namespace_remove(rados_ioctx_t io, const char *namespace_name); +func NamespaceRemove(ioctx *rados.IOContext, namespaceName string) error { + if ioctx == nil { + return ErrNoIOContext + } + if namespaceName == "" { + return ErrNoNamespaceName + } + cNamespaceName := C.CString(namespaceName) + defer C.free(unsafe.Pointer(cNamespaceName)) + + ret := C.rbd_namespace_remove(cephIoctx(ioctx), cNamespaceName) + return getError(ret) +} + +// NamespaceExists checks whether a given namespace exists or not. +// +// Implements: +// +// int rbd_namespace_exists(rados_ioctx_t io, const char *namespace_name, bool *exists); +func NamespaceExists(ioctx *rados.IOContext, namespaceName string) (bool, error) { + if ioctx == nil { + return false, ErrNoIOContext + } + if namespaceName == "" { + return false, ErrNoNamespaceName + } + cNamespaceName := C.CString(namespaceName) + defer C.free(unsafe.Pointer(cNamespaceName)) + + var exists C.bool + ret := C.rbd_namespace_exists(cephIoctx(ioctx), cNamespaceName, &exists) + return bool(exists), getErrorIfNegative(ret) +} + +// NamespaceList returns a slice containing the names of existing rbd namespaces. +// +// Implements: +// +// int rbd_namespace_list(rados_ioctx_t io, char *namespace_names, size_t *size); +func NamespaceList(ioctx *rados.IOContext) (names []string, err error) { + if ioctx == nil { + return nil, ErrNoIOContext + } + var ( + buf []byte + cSize C.size_t + ) + retry.WithSizes(4096, 262144, func(size int) retry.Hint { + cSize = C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_namespace_list(cephIoctx(ioctx), + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getErrorIfNegative(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + + if err != nil { + return nil, err + } + + names = cutil.SplitSparseBuffer(buf[:cSize]) + return names, nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/options.go b/vendor/github.com/ceph/go-ceph/rbd/options.go new file mode 100644 index 0000000000..0d1b78e6a8 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/options.go @@ -0,0 +1,241 @@ +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +const ( + // RBD image options. + + // ImageOptionFormat is the representation of RBD_IMAGE_OPTION_FORMAT from + // librbd + ImageOptionFormat = C.RBD_IMAGE_OPTION_FORMAT + // ImageOptionFeatures is the representation of RBD_IMAGE_OPTION_FEATURES + // from librbd + ImageOptionFeatures = C.RBD_IMAGE_OPTION_FEATURES + // ImageOptionOrder is the representation of RBD_IMAGE_OPTION_ORDER from + // librbd + ImageOptionOrder = C.RBD_IMAGE_OPTION_ORDER + // ImageOptionStripeUnit is the representation of + // RBD_IMAGE_OPTION_STRIPE_UNIT from librbd + ImageOptionStripeUnit = C.RBD_IMAGE_OPTION_STRIPE_UNIT + // ImageOptionStripeCount is the representation of + // RBD_IMAGE_OPTION_STRIPE_COUNT from librbd + ImageOptionStripeCount = C.RBD_IMAGE_OPTION_STRIPE_COUNT + // ImageOptionJournalOrder is the representation of + // RBD_IMAGE_OPTION_JOURNAL_ORDER from librbd + ImageOptionJournalOrder = C.RBD_IMAGE_OPTION_JOURNAL_ORDER + // ImageOptionJournalSplayWidth is the representation of + // RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH from librbd + ImageOptionJournalSplayWidth = C.RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH + // ImageOptionJournalPool is the representation of + // RBD_IMAGE_OPTION_JOURNAL_POOL from librbd + ImageOptionJournalPool = C.RBD_IMAGE_OPTION_JOURNAL_POOL + // ImageOptionFeaturesSet is the representation of + // RBD_IMAGE_OPTION_FEATURES_SET from librbd + ImageOptionFeaturesSet = C.RBD_IMAGE_OPTION_FEATURES_SET + // ImageOptionFeaturesClear is the representation of + // RBD_IMAGE_OPTION_FEATURES_CLEAR from librbd + ImageOptionFeaturesClear = C.RBD_IMAGE_OPTION_FEATURES_CLEAR + // ImageOptionDataPool is the representation of RBD_IMAGE_OPTION_DATA_POOL + // from librbd + ImageOptionDataPool = C.RBD_IMAGE_OPTION_DATA_POOL + // ImageOptionFlatten is the representation of RBD_IMAGE_OPTION_FLATTEN + // from librbd + ImageOptionFlatten = C.RBD_IMAGE_OPTION_FLATTEN + // ImageOptionCloneFormat is the representation of + // RBD_IMAGE_OPTION_CLONE_FORMAT from librbd + ImageOptionCloneFormat = C.RBD_IMAGE_OPTION_CLONE_FORMAT + + // RbdImageOptionFormat deprecated alias for ImageOptionFormat + RbdImageOptionFormat = ImageOptionFormat + // RbdImageOptionFeatures deprecated alias for ImageOptionFeatures + RbdImageOptionFeatures = ImageOptionFeatures + // RbdImageOptionOrder deprecated alias for ImageOptionOrder + RbdImageOptionOrder = ImageOptionOrder + // RbdImageOptionStripeUnit deprecated alias for ImageOptionStripeUnit + RbdImageOptionStripeUnit = ImageOptionStripeUnit + // RbdImageOptionStripeCount deprecated alias for ImageOptionStripeCount + RbdImageOptionStripeCount = ImageOptionStripeCount + // RbdImageOptionJournalOrder deprecated alias for ImageOptionJournalOrder + RbdImageOptionJournalOrder = ImageOptionJournalOrder + // RbdImageOptionJournalSplayWidth deprecated alias for + RbdImageOptionJournalSplayWidth = ImageOptionJournalSplayWidth + // RbdImageOptionJournalPool deprecated alias for ImageOptionJournalPool + RbdImageOptionJournalPool = ImageOptionJournalPool + // RbdImageOptionFeaturesSet deprecated alias for ImageOptionFeaturesSet + RbdImageOptionFeaturesSet = ImageOptionFeaturesSet + // RbdImageOptionFeaturesClear deprecated alias for ImageOptionFeaturesClear + RbdImageOptionFeaturesClear = ImageOptionFeaturesClear + // RbdImageOptionDataPool deprecated alias for ImageOptionDataPool + RbdImageOptionDataPool = ImageOptionDataPool +) + +// ImageOptions represents a group of configurable image options. +type ImageOptions struct { + options C.rbd_image_options_t +} + +// ImageOption values are unique keys for configurable options. +type ImageOption C.int + +// revive:disable:exported Deprecated aliases + +// RbdImageOptions deprecated alias for ImageOptions +type RbdImageOptions = ImageOptions + +// RbdImageOption is a deprecated alias for ImageOption +type RbdImageOption = ImageOption + +//revive:enable:exported + +// NewRbdImageOptions creates a new RbdImageOptions struct. Call +// RbdImageOptions.Destroy() to free the resources. +// +// Implements: +// +// void rbd_image_options_create(rbd_image_options_t* opts) +func NewRbdImageOptions() *ImageOptions { + rio := &ImageOptions{} + C.rbd_image_options_create(&rio.options) + return rio +} + +// Destroy a RbdImageOptions struct and free the associated resources. +// +// Implements: +// +// void rbd_image_options_destroy(rbd_image_options_t opts); +func (rio *ImageOptions) Destroy() { + C.rbd_image_options_destroy(rio.options) +} + +// SetString sets the value of the RbdImageOption to the given string. +// +// Implements: +// +// int rbd_image_options_set_string(rbd_image_options_t opts, int optname, +// const char* optval); +func (rio *ImageOptions) SetString(option ImageOption, value string) error { + cValue := C.CString(value) + defer C.free(unsafe.Pointer(cValue)) + + ret := C.rbd_image_options_set_string(rio.options, C.int(option), cValue) + if ret != 0 { + return fmt.Errorf("%v, could not set option %v to \"%v\"", + getError(ret), option, value) + } + + return nil +} + +// GetString returns the string value of the RbdImageOption. +// +// Implements: +// +// int rbd_image_options_get_string(rbd_image_options_t opts, int optname, +// char* optval, size_t maxlen); +func (rio *ImageOptions) GetString(option ImageOption) (string, error) { + value := make([]byte, 4096) + + ret := C.rbd_image_options_get_string(rio.options, C.int(option), + (*C.char)(unsafe.Pointer(&value[0])), + C.size_t(len(value))) + if ret != 0 { + return "", fmt.Errorf("%v, could not get option %v", getError(ret), option) + } + + return C.GoString((*C.char)(unsafe.Pointer(&value[0]))), nil +} + +// SetUint64 sets the value of the RbdImageOption to the given uint64. +// +// Implements: +// +// int rbd_image_options_set_uint64(rbd_image_options_t opts, int optname, +// const uint64_t optval); +func (rio *ImageOptions) SetUint64(option ImageOption, value uint64) error { + cValue := C.uint64_t(value) + + ret := C.rbd_image_options_set_uint64(rio.options, C.int(option), cValue) + if ret != 0 { + return fmt.Errorf("%v, could not set option %v to \"%v\"", + getError(ret), option, value) + } + + return nil +} + +// GetUint64 returns the uint64 value of the RbdImageOption. +// +// Implements: +// +// int rbd_image_options_get_uint64(rbd_image_options_t opts, int optname, +// uint64_t* optval); +func (rio *ImageOptions) GetUint64(option ImageOption) (uint64, error) { + var cValue C.uint64_t + + ret := C.rbd_image_options_get_uint64(rio.options, C.int(option), &cValue) + if ret != 0 { + return 0, fmt.Errorf("%v, could not get option %v", getError(ret), option) + } + + return uint64(cValue), nil +} + +// IsSet returns a true if the RbdImageOption is set, false otherwise. +// +// Implements: +// +// int rbd_image_options_is_set(rbd_image_options_t opts, int optname, +// bool* is_set); +func (rio *ImageOptions) IsSet(option ImageOption) (bool, error) { + var cSet C.bool + + ret := C.rbd_image_options_is_set(rio.options, C.int(option), &cSet) + if ret != 0 { + return false, fmt.Errorf("%v, could not check option %v", getError(ret), option) + } + + return bool(cSet), nil +} + +// Unset a given RbdImageOption. +// +// Implements: +// +// int rbd_image_options_unset(rbd_image_options_t opts, int optname) +func (rio *ImageOptions) Unset(option ImageOption) error { + ret := C.rbd_image_options_unset(rio.options, C.int(option)) + if ret != 0 { + return fmt.Errorf("%v, could not unset option %v", getError(ret), option) + } + + return nil +} + +// Clear all options in the RbdImageOptions. +// +// Implements: +// +// void rbd_image_options_clear(rbd_image_options_t opts) +func (rio *ImageOptions) Clear() { + C.rbd_image_options_clear(rio.options) +} + +// IsEmpty returns true if there are no options set in the RbdImageOptions, +// false otherwise. +// +// Implements: +// +// int rbd_image_options_is_empty(rbd_image_options_t opts) +func (rio *ImageOptions) IsEmpty() bool { + ret := C.rbd_image_options_is_empty(rio.options) + return ret != 0 +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/options_octopus.go b/vendor/github.com/ceph/go-ceph/rbd/options_octopus.go new file mode 100644 index 0000000000..75537784be --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/options_octopus.go @@ -0,0 +1,13 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +// #include +import "C" + +const ( + // ImageOptionMirrorImageMode is the representation of + // RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE from librbd + ImageOptionMirrorImageMode = C.RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE +) diff --git a/vendor/github.com/ceph/go-ceph/rbd/pool_nautilus.go b/vendor/github.com/ceph/go-ceph/rbd/pool_nautilus.go new file mode 100644 index 0000000000..d860c5aa76 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/pool_nautilus.go @@ -0,0 +1,224 @@ +// +// Ceph Nautilus is the first release that includes rbd_pool_metadata_get(), +// rbd_pool_metadata_set() and rbd_pool_metadata_remove(). + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/retry" + "github.com/ceph/go-ceph/rados" +) + +// GetPoolMetadata returns pool metadata associated with the given key. +// +// Implements: +// +// int rbd_pool_metadata_get(rados_ioctx_t io_ctx, const char *key, char *value, size_t *val_len); +func GetPoolMetadata(ioctx *rados.IOContext, key string) (string, error) { + if ioctx == nil { + return "", ErrNoIOContext + } + + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + var ( + buf []byte + err error + ) + retry.WithSizes(4096, 262144, func(size int) retry.Hint { + cSize := C.size_t(size) + buf = make([]byte, cSize) + ret := C.rbd_pool_metadata_get(cephIoctx(ioctx), + cKey, + (*C.char)(unsafe.Pointer(&buf[0])), + &cSize) + err = getError(ret) + return retry.Size(int(cSize)).If(err == errRange) + }) + + if err != nil { + return "", err + } + return C.GoString((*C.char)(unsafe.Pointer(&buf[0]))), nil +} + +// SetPoolMetadata updates the pool metadata string associated with the given key. +// +// Implements: +// +// int rbd_pool_metadata_set(rados_ioctx_t io_ctx, const char *key, const char *value); +func SetPoolMetadata(ioctx *rados.IOContext, key, value string) error { + if ioctx == nil { + return ErrNoIOContext + } + + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + cValue := C.CString(value) + defer C.free(unsafe.Pointer(cValue)) + + ret := C.rbd_pool_metadata_set(cephIoctx(ioctx), cKey, cValue) + return getError(ret) +} + +// RemovePoolMetadata removes the pool metadata value for a given pool metadata key. +// +// Implements: +// +// int rbd_pool_metadata_remove(rados_ioctx_t io_ctx, const char *key) +func RemovePoolMetadata(ioctx *rados.IOContext, key string) error { + if ioctx == nil { + return ErrNoIOContext + } + + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + ret := C.rbd_pool_metadata_remove(cephIoctx(ioctx), cKey) + return getError(ret) +} + +// PoolInit initializes a pool for use by rbd. +// This function does not create new pools, rather it prepares the pool +// to host rbd images. +// +// Implements: +// +// int rbd_pool_init(rados_ioctx_t io, bool force) +func PoolInit(ioctx *rados.IOContext, force bool) error { + if ioctx == nil { + return ErrNoIOContext + } + + ret := C.rbd_pool_init(cephIoctx(ioctx), C.bool(force)) + return getError(ret) +} + +// poolStats represents RBD pool stats variable. +type poolStats struct { + stats C.rbd_pool_stats_t +} + +// poolStatsCreate creates a new poolStats struct. +// +// Implements: +// +// void rbd_pool_stats_create(rbd_pool_stats_t *stats) +func poolStatsCreate() *poolStats { + poolstats := &poolStats{} + C.rbd_pool_stats_create(&poolstats.stats) + return poolstats +} + +// destroy a poolStats struct and free the associated resources. +// +// Implements: +// +// void rbd_pool_stats_destroy(rbd_pool_stats_t stats) +func (poolstats *poolStats) destroy() { + C.rbd_pool_stats_destroy(poolstats.stats) + + if poolstats.stats != nil { + poolstats.stats = nil + } +} + +// PoolStatOption represents a group of configurable pool stat options. +type PoolStatOption C.rbd_pool_stat_option_t + +const ( + // PoolStatOptionImages is the representation of + // RBD_POOL_STAT_OPTION_IMAGES from librbd. + PoolStatOptionImages = PoolStatOption(C.RBD_POOL_STAT_OPTION_IMAGES) + // PoolStatOptionImageProvisionedBytes is the representation of + // RBD_POOL_STAT_OPTION_IMAGE_PROVISIONED_BYTES from librbd. + PoolStatOptionImageProvisionedBytes = PoolStatOption(C.RBD_POOL_STAT_OPTION_IMAGE_PROVISIONED_BYTES) + // PoolStatOptionImageMaxProvisionedBytes is the representation of + // RBD_POOL_STAT_OPTION_IMAGE_MAX_PROVISIONED_BYTES from librbd. + PoolStatOptionImageMaxProvisionedBytes = PoolStatOption(C.RBD_POOL_STAT_OPTION_IMAGE_MAX_PROVISIONED_BYTES) + // PoolStatOptionImageSnapshots is the representation of + // RBD_POOL_STAT_OPTION_IMAGE_SNAPSHOTS from librbd. + PoolStatOptionImageSnapshots = PoolStatOption(C.RBD_POOL_STAT_OPTION_IMAGE_SNAPSHOTS) + // PoolStatOptionTrashImages is the representation of + // RBD_POOL_STAT_OPTION_TRASH_IMAGES from librbd. + PoolStatOptionTrashImages = PoolStatOption(C.RBD_POOL_STAT_OPTION_TRASH_IMAGES) + // PoolStatOptionTrashProvisionedBytes is the representation of + // RBD_POOL_STAT_OPTION_TRASH_PROVISIONED_BYTES from librbd. + PoolStatOptionTrashProvisionedBytes = PoolStatOption(C.RBD_POOL_STAT_OPTION_TRASH_PROVISIONED_BYTES) + // PoolStatOptionTrashMaxProvisionedBytes is the representation of + // RBD_POOL_STAT_OPTION_TRASH_MAX_PROVISIONED_BYTES from librbd. + PoolStatOptionTrashMaxProvisionedBytes = PoolStatOption(C.RBD_POOL_STAT_OPTION_TRASH_MAX_PROVISIONED_BYTES) + // PoolStatOptionTrashSnapshots is the representation of + // RBD_POOL_STAT_OPTION_TRASH_SNAPSHOTS from librbd. + PoolStatOptionTrashSnapshots = PoolStatOption(C.RBD_POOL_STAT_OPTION_TRASH_SNAPSHOTS) +) + +// addPoolStatOption adds the given PoolStatOption to PoolStats. +// +// Implements: +// +// int rbd_pool_stats_option_add_uint64(rbd_pool_stats_t stats, int stat_option, uint64_t* stat_val) +func (poolstats *poolStats) addPoolStatOption(option PoolStatOption, val *uint64) error { + ret := C.rbd_pool_stats_option_add_uint64( + poolstats.stats, + C.int(option), + (*C.uint64_t)(val)) + return getError(ret) +} + +// GetAllPoolStats returns a map of all PoolStatOption(s) to their respective values. +// +// Implements: +// +// int rbd_pool_stats_get(rados_ioctx_t io, rbd_pool_stats_t stats); +func GetAllPoolStats(ioctx *rados.IOContext) (map[PoolStatOption]uint64, error) { + var omap = make(map[PoolStatOption]uint64) + if ioctx == nil { + return omap, ErrNoIOContext + } + + poolstats := poolStatsCreate() + defer func() { + poolstats.destroy() + }() + + var keys [8]PoolStatOption + + keys[0] = PoolStatOptionImages + keys[1] = PoolStatOptionImageProvisionedBytes + keys[2] = PoolStatOptionImageMaxProvisionedBytes + keys[3] = PoolStatOptionImageSnapshots + keys[4] = PoolStatOptionTrashImages + keys[5] = PoolStatOptionTrashProvisionedBytes + keys[6] = PoolStatOptionTrashMaxProvisionedBytes + keys[7] = PoolStatOptionTrashSnapshots + + ovalArray := make([]uint64, len(keys)) + + // add option with the address where the respective value would be stored. + for i, key := range keys { + err := poolstats.addPoolStatOption(key, &ovalArray[i]) + if err != nil { + return omap, err + } + } + + ret := C.rbd_pool_stats_get(cephIoctx(ioctx), poolstats.stats) + if ret < 0 { + return omap, getError(ret) + } + + for j, key := range keys { + omap[key] = ovalArray[j] + } + return omap, nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/rbd.go b/vendor/github.com/ceph/go-ceph/rbd/rbd.go new file mode 100644 index 0000000000..b05b14397c --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/rbd.go @@ -0,0 +1,1325 @@ +package rbd + +// #cgo LDFLAGS: -lrbd +// /* force XSI-complaint strerror_r() */ +// #define _POSIX_C_SOURCE 200112L +// #undef _GNU_SOURCE +// #include +// #include +// #include +// #include +import "C" + +import ( + "errors" + "io" + "time" + "unsafe" + + "github.com/ceph/go-ceph/internal/cutil" + "github.com/ceph/go-ceph/internal/retry" + ts "github.com/ceph/go-ceph/internal/timespec" + "github.com/ceph/go-ceph/rados" +) + +const ( + // Image.Seek() constants: + + // SeekSet is used with Seek to absolutely position the file. + SeekSet = int(C.SEEK_SET) + // SeekCur is used with Seek to position the file relatively to the current + // position. + SeekCur = int(C.SEEK_CUR) + // SeekEnd is used with Seek to position the file relatively to the end. + SeekEnd = int(C.SEEK_END) +) + +// bits for Image.validate() and Snapshot.validate() +const ( + imageNeedsName uint32 = 1 << iota + imageNeedsIOContext + imageIsOpen + imageIsNotOpen + snapshotNeedsName + + // NoSnapshot indicates that no snapshot name is in use (see OpenImage) + NoSnapshot = "" +) + +// Timespec is a public type for the internal C 'struct timespec' +type Timespec ts.Timespec + +// revive:disable:var-naming old-yet-exported public api + +// ImageInfo represents the status information for an image. +type ImageInfo struct { + Size uint64 + Obj_size uint64 + Num_objs uint64 + Order int + Block_name_prefix string +} + +// revive:enable:var-naming + +// SnapInfo represents the status information for a snapshot. +type SnapInfo struct { + Id uint64 + Size uint64 + Name string +} + +// Locker provides info about a client that is locking an image. +type Locker struct { + Client string + Cookie string + Addr string +} + +// Image is a handle for an RBD image. +type Image struct { + io.Reader + io.Writer + io.Seeker + io.ReaderAt + io.WriterAt + name string + offset int64 + ioctx *rados.IOContext + image C.rbd_image_t +} + +// TrashInfo contains information about trashed RBDs. +type TrashInfo struct { + Id string // Id string, required to remove / restore trashed RBDs. + Name string // Original name of trashed RBD. + DeletionTime time.Time // Date / time at which the RBD was moved to the trash. + DefermentEndTime time.Time // Date / time after which the trashed RBD may be permanently deleted. +} + +// cephIoctx returns a ceph rados_ioctx_t given a go-ceph rados IOContext. +func cephIoctx(radosIoctx *rados.IOContext) C.rados_ioctx_t { + p := radosIoctx.Pointer() + if p == nil { + panic("invalid IOContext pointer") + } + return C.rados_ioctx_t(p) +} + +// test if a bit is set in the given value +func hasBit(value, bit uint32) bool { + return (value & bit) == bit +} + +// validate the attributes listed in the req bitmask, and return an error in +// case the attribute is not set +func (image *Image) validate(req uint32) error { + if hasBit(req, imageNeedsName) && image.name == "" { + return ErrNoName + } else if hasBit(req, imageNeedsIOContext) && image.ioctx == nil { + return ErrNoIOContext + } else if hasBit(req, imageIsOpen) && image.image == nil { + return ErrImageNotOpen + } else if hasBit(req, imageIsNotOpen) && image.image != nil { + return ErrImageIsOpen + } + + return nil +} + +// Version returns the major, minor, and patch level of the librbd library. +func Version() (int, int, int) { + var cMajor, cMinor, cPatch C.int + C.rbd_version(&cMajor, &cMinor, &cPatch) + return int(cMajor), int(cMinor), int(cPatch) +} + +// GetImage gets a reference to a previously created rbd image. +func GetImage(ioctx *rados.IOContext, name string) *Image { + return &Image{ + ioctx: ioctx, + name: name, + } +} + +// Create a new rbd image. +// +// Implements: +// +// int rbd_create(rados_ioctx_t io, const char *name, uint64_t size, int *order); +// +// Also implements (for backward compatibility): +// +// int rbd_create2(rados_ioctx_t io, const char *name, uint64_t size, +// uint64_t features, int *order); +// int rbd_create3(rados_ioctx_t io, const char *name, uint64_t size, +// uint64_t features, int *order, +// uint64_t stripe_unit, uint64_t stripe_count); +func Create(ioctx *rados.IOContext, name string, size uint64, order int, + args ...uint64) (image *Image, err error) { + var ret C.int + + switch len(args) { + case 3: + return Create3(ioctx, name, size, args[0], order, args[1], + args[2]) + case 1: + return Create2(ioctx, name, size, args[0], order) + case 0: + cOrder := C.int(order) + cName := C.CString(name) + + defer C.free(unsafe.Pointer(cName)) + + ret = C.rbd_create(cephIoctx(ioctx), + cName, C.uint64_t(size), &cOrder) + default: + return nil, errors.New("Wrong number of argument") + } + + if ret < 0 { + return nil, rbdError(ret) + } + + return &Image{ + ioctx: ioctx, + name: name, + }, nil +} + +// Create2 creates a new rbd image using provided features. +// +// Implements: +// +// int rbd_create2(rados_ioctx_t io, const char *name, uint64_t size, +// uint64_t features, int *order); +func Create2(ioctx *rados.IOContext, name string, size uint64, features uint64, + order int) (image *Image, err error) { + var ret C.int + + cOrder := C.int(order) + cName := C.CString(name) + + defer C.free(unsafe.Pointer(cName)) + + ret = C.rbd_create2(cephIoctx(ioctx), cName, + C.uint64_t(size), C.uint64_t(features), &cOrder) + if ret < 0 { + return nil, rbdError(ret) + } + + return &Image{ + ioctx: ioctx, + name: name, + }, nil +} + +// Create3 creates a new rbd image using provided features and stripe +// parameters. +// +// Implements: +// +// int rbd_create3(rados_ioctx_t io, const char *name, uint64_t size, +// uint64_t features, int *order, +// uint64_t stripe_unit, uint64_t stripe_count); +func Create3(ioctx *rados.IOContext, name string, size uint64, features uint64, + order int, stripeUnit uint64, stripeCount uint64) (image *Image, err error) { + var ret C.int + + cOrder := C.int(order) + cName := C.CString(name) + + defer C.free(unsafe.Pointer(cName)) + + ret = C.rbd_create3(cephIoctx(ioctx), cName, + C.uint64_t(size), C.uint64_t(features), &cOrder, + C.uint64_t(stripeUnit), C.uint64_t(stripeCount)) + if ret < 0 { + return nil, rbdError(ret) + } + + return &Image{ + ioctx: ioctx, + name: name, + }, nil +} + +// Clone a new rbd image from a snapshot. +// +// Implements: +// +// int rbd_clone(rados_ioctx_t p_ioctx, const char *p_name, +// const char *p_snapname, rados_ioctx_t c_ioctx, +// const char *c_name, uint64_t features, int *c_order); +func (image *Image) Clone(snapname string, cIoctx *rados.IOContext, cName string, features uint64, order int) (*Image, error) { + if err := image.validate(imageNeedsIOContext); err != nil { + return nil, err + } + + cOrder := C.int(order) + cParentName := C.CString(image.name) + cParentSnapName := C.CString(snapname) + cCloneName := C.CString(cName) + + defer C.free(unsafe.Pointer(cParentName)) + defer C.free(unsafe.Pointer(cParentSnapName)) + defer C.free(unsafe.Pointer(cCloneName)) + + ret := C.rbd_clone( + cephIoctx(image.ioctx), + cParentName, + cParentSnapName, + cephIoctx(cIoctx), + cCloneName, + C.uint64_t(features), + &cOrder) + if ret < 0 { + return nil, rbdError(ret) + } + + return &Image{ + ioctx: cIoctx, + name: cName, + }, nil +} + +// Remove the specified rbd image. +// +// Implements: +// +// int rbd_remove(rados_ioctx_t io, const char *name); +func (image *Image) Remove() error { + if err := image.validate(imageNeedsIOContext | imageNeedsName | imageIsNotOpen); err != nil { + return err + } + return RemoveImage(image.ioctx, image.name) +} + +// Trash will move an image into the RBD trash, where it will be protected (i.e., salvageable) for +// at least the specified delay. +func (image *Image) Trash(delay time.Duration) error { + if err := image.validate(imageNeedsIOContext | imageNeedsName); err != nil { + return err + } + + cName := C.CString(image.name) + defer C.free(unsafe.Pointer(cName)) + + return getError(C.rbd_trash_move(cephIoctx(image.ioctx), cName, + C.uint64_t(delay.Seconds()))) +} + +// Rename an rbd image. +// +// Implements: +// +// int rbd_rename(rados_ioctx_t src_io_ctx, const char *srcname, const char *destname); +func (image *Image) Rename(destname string) error { + if err := image.validate(imageNeedsIOContext | imageNeedsName); err != nil { + return err + } + + cSrcName := C.CString(image.name) + cDestName := C.CString(destname) + + defer C.free(unsafe.Pointer(cSrcName)) + defer C.free(unsafe.Pointer(cDestName)) + + err := rbdError(C.rbd_rename(cephIoctx(image.ioctx), + cSrcName, cDestName)) + if err == 0 { + image.name = destname + return nil + } + return err +} + +// Open the rbd image. +// +// Deprecated: use OpenImage and OpenImageReadOnly instead +func (image *Image) Open(args ...interface{}) error { + if err := image.validate(imageNeedsIOContext | imageNeedsName); err != nil { + return err + } + + var ( + snapName string + readOnly bool + ) + for _, arg := range args { + switch t := arg.(type) { + case string: + snapName = t + case bool: + readOnly = t + default: + return errors.New("Unexpected argument") + } + } + + var ( + tmp *Image + err error + ) + if readOnly { + tmp, err = OpenImageReadOnly(image.ioctx, image.name, snapName) + } else { + tmp, err = OpenImage(image.ioctx, image.name, snapName) + } + if err != nil { + return err + } + + image.image = tmp.image + return nil +} + +// Close an open rbd image. +// +// Implements: +// +// int rbd_close(rbd_image_t image); +func (image *Image) Close() error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + if ret := C.rbd_close(image.image); ret != 0 { + return rbdError(ret) + } + + image.image = nil + return nil +} + +// Resize an rbd image. +// +// Implements: +// +// int rbd_resize(rbd_image_t image, uint64_t size); +func (image *Image) Resize(size uint64) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + return getError(C.rbd_resize(image.image, C.uint64_t(size))) +} + +// Stat an rbd image. +// +// Implements: +// +// int rbd_stat(rbd_image_t image, rbd_image_info_t *info, size_t infosize); +func (image *Image) Stat() (info *ImageInfo, err error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + var cStat C.rbd_image_info_t + + if ret := C.rbd_stat(image.image, &cStat, C.size_t(unsafe.Sizeof(info))); ret < 0 { + return info, rbdError(ret) + } + + return &ImageInfo{ + Size: uint64(cStat.size), + Obj_size: uint64(cStat.obj_size), + Num_objs: uint64(cStat.num_objs), + Order: int(cStat.order), + Block_name_prefix: C.GoString((*C.char)(&cStat.block_name_prefix[0]))}, nil +} + +// IsOldFormat returns true if the rbd image uses the old format. +// +// Implements: +// +// int rbd_get_old_format(rbd_image_t image, uint8_t *old); +func (image *Image) IsOldFormat() (bool, error) { + if err := image.validate(imageIsOpen); err != nil { + return false, err + } + + var cOldFormat C.uint8_t + ret := C.rbd_get_old_format(image.image, + &cOldFormat) + if ret < 0 { + return false, rbdError(ret) + } + + return cOldFormat != 0, nil +} + +// GetSize returns the size of the rbd image. +// +// Implements: +// +// int rbd_size(rbd_image_t image, uint64_t *size); +func (image *Image) GetSize() (size uint64, err error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + if ret := C.rbd_get_size(image.image, (*C.uint64_t)(&size)); ret < 0 { + return 0, rbdError(ret) + } + + return size, nil +} + +// GetStripeUnit returns the stripe-unit value for the rbd image. +// +// Implements: +// +// int rbd_get_stripe_unit(rbd_image_t image, uint64_t *stripe_unit); +func (image *Image) GetStripeUnit() (uint64, error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + var stripeUnit uint64 + if ret := C.rbd_get_stripe_unit(image.image, (*C.uint64_t)(&stripeUnit)); ret < 0 { + return 0, rbdError(ret) + } + + return stripeUnit, nil +} + +// GetStripeCount returns the stripe-count value for the rbd image. +// +// Implements: +// +// int rbd_get_stripe_count(rbd_image_t image, uint64_t *stripe_count); +func (image *Image) GetStripeCount() (uint64, error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + var stripeCount uint64 + if ret := C.rbd_get_stripe_count(image.image, (*C.uint64_t)(&stripeCount)); ret < 0 { + return 0, rbdError(ret) + } + + return stripeCount, nil +} + +// GetOverlap returns the overlapping bytes between the rbd image and its +// parent. +// +// Implements: +// +// int rbd_get_overlap(rbd_image_t image, uint64_t *overlap); +func (image *Image) GetOverlap() (overlap uint64, err error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + if ret := C.rbd_get_overlap(image.image, (*C.uint64_t)(&overlap)); ret < 0 { + return overlap, rbdError(ret) + } + + return overlap, nil +} + +// Copy one rbd image to another. +// +// Implements: +// +// int rbd_copy(rbd_image_t image, rados_ioctx_t dest_io_ctx, const char *destname); +func (image *Image) Copy(ioctx *rados.IOContext, destname string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } else if ioctx == nil { + return ErrNoIOContext + } else if len(destname) == 0 { + return ErrNoName + } + + cDestName := C.CString(destname) + defer C.free(unsafe.Pointer(cDestName)) + + return getError(C.rbd_copy(image.image, + cephIoctx(ioctx), cDestName)) +} + +// Copy2 copies one rbd image to another, using an image handle. +// +// Implements: +// +// int rbd_copy2(rbd_image_t src, rbd_image_t dest); +func (image *Image) Copy2(dest *Image) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } else if err := dest.validate(imageIsOpen); err != nil { + return err + } + + return getError(C.rbd_copy2(image.image, dest.image)) +} + +// DeepCopy an rbd image to a new image with specific options. +// +// Implements: +// +// int rbd_deep_copy(rbd_image_t src, rados_ioctx_t dest_io_ctx, +// const char *destname, rbd_image_options_t dest_opts); +func (image *Image) DeepCopy(ioctx *rados.IOContext, destname string, rio *ImageOptions) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + if ioctx == nil { + return ErrNoIOContext + } + if destname == "" { + return ErrNoName + } + if rio == nil { + return rbdError(C.EINVAL) + } + + cDestname := C.CString(destname) + defer C.free(unsafe.Pointer(cDestname)) + + ret := C.rbd_deep_copy(image.image, cephIoctx(ioctx), cDestname, + C.rbd_image_options_t(rio.options)) + return getError(ret) +} + +// Flatten removes snapshot references from the image. +// +// Implements: +// +// int rbd_flatten(rbd_image_t image); +func (image *Image) Flatten() error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + return getError(C.rbd_flatten(image.image)) +} + +// ListLockers returns a list of clients that have locks on the image. +// +// Impelemnts: +// +// ssize_t rbd_list_lockers(rbd_image_t image, int *exclusive, +// char *tag, size_t *tag_len, +// char *clients, size_t *clients_len, +// char *cookies, size_t *cookies_len, +// char *addrs, size_t *addrs_len); +func (image *Image) ListLockers() (tag string, lockers []Locker, err error) { + if err := image.validate(imageIsOpen); err != nil { + return "", nil, err + } + + var cExclusive C.int + var cTagLen, cClientsLen, cCookiesLen, cAddrsLen C.size_t + var cLockerCount C.ssize_t + + C.rbd_list_lockers(image.image, &cExclusive, + nil, (*C.size_t)(&cTagLen), + nil, (*C.size_t)(&cClientsLen), + nil, (*C.size_t)(&cCookiesLen), + nil, (*C.size_t)(&cAddrsLen)) + + // no locker held on rbd image when either c_clients_len, + // c_cookies_len or c_addrs_len is *0*, so just quickly returned + if int(cClientsLen) == 0 || int(cCookiesLen) == 0 || + int(cAddrsLen) == 0 { + lockers = make([]Locker, 0) + return "", lockers, nil + } + + tagBuf := make([]byte, cTagLen) + clientsBuf := make([]byte, cClientsLen) + cookiesBuf := make([]byte, cCookiesLen) + addrsBuf := make([]byte, cAddrsLen) + + cLockerCount = C.rbd_list_lockers(image.image, &cExclusive, + (*C.char)(unsafe.Pointer(&tagBuf[0])), (*C.size_t)(&cTagLen), + (*C.char)(unsafe.Pointer(&clientsBuf[0])), (*C.size_t)(&cClientsLen), + (*C.char)(unsafe.Pointer(&cookiesBuf[0])), (*C.size_t)(&cCookiesLen), + (*C.char)(unsafe.Pointer(&addrsBuf[0])), (*C.size_t)(&cAddrsLen)) + + // rbd_list_lockers returns negative value for errors + // and *0* means no locker held on rbd image. + // but *0* is unexpected here because first rbd_list_lockers already + // dealt with no locker case + if int(cLockerCount) <= 0 { + return "", nil, rbdError(cLockerCount) + } + + clients := cutil.SplitSparseBuffer(clientsBuf) + cookies := cutil.SplitSparseBuffer(cookiesBuf) + addrs := cutil.SplitSparseBuffer(addrsBuf) + + lockers = make([]Locker, cLockerCount) + for i := 0; i < int(cLockerCount); i++ { + lockers[i] = Locker{Client: clients[i], + Cookie: cookies[i], + Addr: addrs[i]} + } + + return string(tagBuf), lockers, nil +} + +// LockExclusive acquires an exclusive lock on the rbd image. +// +// Implements: +// +// int rbd_lock_exclusive(rbd_image_t image, const char *cookie); +func (image *Image) LockExclusive(cookie string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cCookie := C.CString(cookie) + defer C.free(unsafe.Pointer(cCookie)) + + return getError(C.rbd_lock_exclusive(image.image, cCookie)) +} + +// LockShared acquires a shared lock on the rbd image. +// +// Implements: +// +// int rbd_lock_shared(rbd_image_t image, const char *cookie, const char *tag); +func (image *Image) LockShared(cookie string, tag string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cCookie := C.CString(cookie) + cTag := C.CString(tag) + defer C.free(unsafe.Pointer(cCookie)) + defer C.free(unsafe.Pointer(cTag)) + + return getError(C.rbd_lock_shared(image.image, cCookie, cTag)) +} + +// Unlock releases a lock on the image. +// +// Implements: +// +// int rbd_lock_shared(rbd_image_t image, const char *cookie, const char *tag); +func (image *Image) Unlock(cookie string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cCookie := C.CString(cookie) + defer C.free(unsafe.Pointer(cCookie)) + + return getError(C.rbd_unlock(image.image, cCookie)) +} + +// BreakLock forces the release of a lock held by another client. +// +// Implements: +// +// int rbd_break_lock(rbd_image_t image, const char *client, const char *cookie); +func (image *Image) BreakLock(client string, cookie string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cClient := C.CString(client) + cCookie := C.CString(cookie) + defer C.free(unsafe.Pointer(cClient)) + defer C.free(unsafe.Pointer(cCookie)) + + return getError(C.rbd_break_lock(image.image, cClient, cCookie)) +} + +// Read data from the image. The length of the read is determined by the length +// of the buffer slice. The position of the read is determined by an internal +// offset which is not safe in concurrent code. Prefer ReadAt when possible. +// +// Implements: +// +// ssize_t rbd_read(rbd_image_t image, uint64_t ofs, size_t len, +// char *buf); +func (image *Image) Read(data []byte) (int, error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + if len(data) == 0 { + return 0, nil + } + + ret := int(C.rbd_read( + image.image, + (C.uint64_t)(image.offset), + (C.size_t)(len(data)), + (*C.char)(unsafe.Pointer(&data[0])))) + + if ret < 0 { + return 0, rbdError(ret) + } + + image.offset += int64(ret) + if ret < len(data) { + return ret, io.EOF + } + + return ret, nil +} + +// Write data to an image. The length of the write is determined by the length of +// the buffer slice. The position of the write is determined by an internal +// offset which is not safe in concurrent code. Prefer WriteAt when possible. +// +// Implements: +// +// ssize_t rbd_write(rbd_image_t image, uint64_t ofs, size_t len, +// const char *buf); +func (image *Image) Write(data []byte) (n int, err error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + ret := int(C.rbd_write(image.image, C.uint64_t(image.offset), + C.size_t(len(data)), (*C.char)(unsafe.Pointer(&data[0])))) + + if ret >= 0 { + image.offset += int64(ret) + } + + if ret != len(data) { + err = rbdError(-C.EPERM) + } + + return ret, err +} + +// Seek updates the internal file position for the current image. +func (image *Image) Seek(offset int64, whence int) (int64, error) { + switch whence { + case SeekSet: + image.offset = offset + case SeekCur: + image.offset += offset + case SeekEnd: + stats, err := image.Stat() + if err != nil { + return 0, err + } + image.offset = int64(stats.Size) - offset + default: + return 0, errors.New("Wrong value for whence") + } + return image.offset, nil +} + +// Discard the supplied range from the image. The supplied range will be read +// as zeros once Discard returns. The discarded range will no longer take up +// space. +// +// Implements: +// +// int rbd_discard(rbd_image_t image, uint64_t ofs, uint64_t len); +func (image *Image) Discard(ofs uint64, length uint64) (int, error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + ret := C.rbd_discard(image.image, C.uint64_t(ofs), C.uint64_t(length)) + if ret < 0 { + return 0, rbdError(ret) + } + + return int(ret), nil +} + +// ReadAt copies data from the image into the supplied buffer. +func (image *Image) ReadAt(data []byte, off int64) (int, error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + if len(data) == 0 { + return 0, nil + } + + ret := int(C.rbd_read( + image.image, + (C.uint64_t)(off), + (C.size_t)(len(data)), + (*C.char)(unsafe.Pointer(&data[0])))) + + if ret < 0 { + return 0, rbdError(ret) + } + + if ret < len(data) { + return ret, io.EOF + } + + return ret, nil +} + +// WriteAt copies data from the supplied buffer to the image. +func (image *Image) WriteAt(data []byte, off int64) (n int, err error) { + if err := image.validate(imageIsOpen); err != nil { + return 0, err + } + + if len(data) == 0 { + return 0, nil + } + + ret := int(C.rbd_write(image.image, C.uint64_t(off), + C.size_t(len(data)), (*C.char)(unsafe.Pointer(&data[0])))) + + if ret != len(data) { + err = rbdError(-C.EPERM) + } + + return ret, err +} + +// WriteSame repeats writing data from starting point ofs until n bytes have +// been written. +// +// Implements: +// +// ssize_t rbd_writesame(rbd_image_t image, uint64_t ofs, size_t len, +// const char *buf, size_t data_len, int op_flags); +func (image *Image) WriteSame(ofs, n uint64, data []byte, flags rados.OpFlags) (int64, error) { + var err error + + if err = image.validate(imageIsOpen); err != nil { + return 0, err + } + + if len(data) == 0 { + return 0, nil + } + + ret := C.rbd_writesame(image.image, + C.uint64_t(ofs), + C.size_t(n), + (*C.char)(unsafe.Pointer(&data[0])), + C.size_t(len(data)), + C.int(flags)) + if ret < 0 { + err = getError(C.int(ret)) + } + + return int64(ret), err +} + +// Flush all cached writes to storage. +// +// Implements: +// +// int rbd_flush(rbd_image_t image); +func (image *Image) Flush() error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + return getError(C.rbd_flush(image.image)) +} + +// GetSnapshotNames returns more than just the names of snapshots +// associated with the rbd image. +// +// Implements: +// +// int rbd_snap_list(rbd_image_t image, rbd_snap_info_t *snaps, int *max_snaps); +func (image *Image) GetSnapshotNames() (snaps []SnapInfo, err error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + var cMaxSnaps C.int + + ret := C.rbd_snap_list(image.image, nil, &cMaxSnaps) + + cSnaps := make([]C.rbd_snap_info_t, cMaxSnaps) + snaps = make([]SnapInfo, cMaxSnaps) + + ret = C.rbd_snap_list(image.image, + &cSnaps[0], &cMaxSnaps) + if ret < 0 { + return nil, rbdError(ret) + } + + for i, s := range cSnaps { + snaps[i] = SnapInfo{Id: uint64(s.id), + Size: uint64(s.size), + Name: C.GoString(s.name)} + } + + C.rbd_snap_list_end(&cSnaps[0]) + return snaps[:len(snaps)-1], nil +} + +// GetId returns the internal image ID string. +// +// Implements: +// +// int rbd_get_id(rbd_image_t image, char *id, size_t id_len); +func (image *Image) GetId() (string, error) { + if err := image.validate(imageIsOpen); err != nil { + return "", err + } + var ( + err error + buf []byte + ) + retry.WithSizes(1, 8192, func(size int) retry.Hint { + buf = make([]byte, size) + ret := C.rbd_get_id( + image.image, + (*C.char)(unsafe.Pointer(&buf[0])), + C.size_t(size)) + err = getErrorIfNegative(ret) + return retry.DoubleSize.If(err == errRange) + }) + if err != nil { + return "", err + } + id := C.GoString((*C.char)(unsafe.Pointer(&buf[0]))) + return id, nil + +} + +// GetName returns the image name. +func (image *Image) GetName() string { + return image.name +} + +// SetSnapshot updates the rbd image (not the Snapshot) such that the snapshot +// is the source of readable data. +// +// Implements: +// +// int rbd_snap_set(rbd_image_t image, const char *snapname); +func (image *Image) SetSnapshot(snapname string) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + cSnapName := C.CString(snapname) + defer C.free(unsafe.Pointer(cSnapName)) + + return getError(C.rbd_snap_set(image.image, cSnapName)) +} + +// GetTrashList returns a slice of TrashInfo structs, containing information about all RBD images +// currently residing in the trash. +func GetTrashList(ioctx *rados.IOContext) ([]TrashInfo, error) { + var ( + err error + count C.size_t + entries []C.rbd_trash_image_info_t + ) + retry.WithSizes(32, 10240, func(size int) retry.Hint { + count = C.size_t(size) + entries = make([]C.rbd_trash_image_info_t, count) + ret := C.rbd_trash_list(cephIoctx(ioctx), &entries[0], &count) + err = getErrorIfNegative(ret) + return retry.Size(int(count)).If(err == errRange) + }) + if err != nil { + return nil, err + } + // Free rbd_trash_image_info_t pointers + defer C.rbd_trash_list_cleanup(&entries[0], count) + + trashList := make([]TrashInfo, count) + for i, ti := range entries[:count] { + trashList[i] = TrashInfo{ + Id: C.GoString(ti.id), + Name: C.GoString(ti.name), + DeletionTime: time.Unix(int64(ti.deletion_time), 0), + DefermentEndTime: time.Unix(int64(ti.deferment_end_time), 0), + } + } + return trashList, nil +} + +// TrashRemove permanently deletes the trashed RBD with the specified id. +func TrashRemove(ioctx *rados.IOContext, id string, force bool) error { + cid := C.CString(id) + defer C.free(unsafe.Pointer(cid)) + + return getError(C.rbd_trash_remove(cephIoctx(ioctx), cid, C.bool(force))) +} + +// TrashRestore restores the trashed RBD with the specified id back to the pool from whence it +// came, with the specified new name. +func TrashRestore(ioctx *rados.IOContext, id, name string) error { + cid := C.CString(id) + cName := C.CString(name) + defer C.free(unsafe.Pointer(cid)) + defer C.free(unsafe.Pointer(cName)) + + return getError(C.rbd_trash_restore(cephIoctx(ioctx), cid, cName)) +} + +// OpenImage will open an existing rbd image by name and snapshot name, +// returning a new opened image. Pass the NoSnapshot sentinel value as the +// snapName to explicitly indicate that no snapshot name is being provided. +// +// Implements: +// +// int rbd_open(rados_ioctx_t io, const char *name, +// rbd_image_t *image, const char *snap_name); +func OpenImage(ioctx *rados.IOContext, name, snapName string) (*Image, error) { + if ioctx == nil { + return nil, ErrNoIOContext + } + if name == "" { + return nil, ErrNoName + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + var cSnapName *C.char + if snapName != NoSnapshot { + cSnapName = C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + } + + var cImage C.rbd_image_t + ret := C.rbd_open( + cephIoctx(ioctx), + cName, + &cImage, + cSnapName) + + if ret != 0 { + return nil, getError(ret) + } + + return &Image{ + ioctx: ioctx, + name: name, + image: cImage, + }, nil +} + +// OpenImageReadOnly will open an existing rbd image by name and snapshot name, +// returning a new opened-for-read image. Pass the NoSnapshot sentinel value +// as the snapName to explicitly indicate that no snapshot name is being +// provided. +// +// Implements: +// +// int rbd_open_read_only(rados_ioctx_t io, const char *name, +// rbd_image_t *image, const char *snap_name); +func OpenImageReadOnly(ioctx *rados.IOContext, name, snapName string) (*Image, error) { + if ioctx == nil { + return nil, ErrNoIOContext + } + if name == "" { + return nil, ErrNoName + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + var cSnapName *C.char + if snapName != NoSnapshot { + cSnapName = C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + } + + var cImage C.rbd_image_t + ret := C.rbd_open_read_only( + cephIoctx(ioctx), + cName, + &cImage, + cSnapName) + + if ret != 0 { + return nil, getError(ret) + } + + return &Image{ + ioctx: ioctx, + name: name, + image: cImage, + }, nil +} + +// OpenImageById will open an existing rbd image by ID and snapshot name, +// returning a new opened image. Pass the NoSnapshot sentinel value as the +// snapName to explicitly indicate that no snapshot name is being provided. +// Error handling will fail & segfault unless compiled with a version of ceph +// that fixes https://tracker.ceph.com/issues/43178 +// +// Implements: +// +// int rbd_open_by_id(rados_ioctx_t io, const char *id, +// rbd_image_t *image, const char *snap_name); +func OpenImageById(ioctx *rados.IOContext, id, snapName string) (*Image, error) { + if ioctx == nil { + return nil, ErrNoIOContext + } + if id == "" { + return nil, ErrNoName + } + + cid := C.CString(id) + defer C.free(unsafe.Pointer(cid)) + + var cSnapName *C.char + if snapName != NoSnapshot { + cSnapName = C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + } + + var cImage C.rbd_image_t + ret := C.rbd_open_by_id( + cephIoctx(ioctx), + cid, + &cImage, + cSnapName) + + if ret != 0 { + return nil, getError(ret) + } + + return &Image{ + ioctx: ioctx, + image: cImage, + }, nil +} + +// OpenImageByIdReadOnly will open an existing rbd image by ID and snapshot +// name, returning a new opened-for-read image. Pass the NoSnapshot sentinel +// value as the snapName to explicitly indicate that no snapshot name is being +// provided. +// Error handling will fail & segfault unless compiled with a version of ceph +// that fixes https://tracker.ceph.com/issues/43178 +// +// Implements: +// +// int rbd_open_by_id_read_only(rados_ioctx_t io, const char *id, +// rbd_image_t *image, const char *snap_name); +func OpenImageByIdReadOnly(ioctx *rados.IOContext, id, snapName string) (*Image, error) { + if ioctx == nil { + return nil, ErrNoIOContext + } + if id == "" { + return nil, ErrNoName + } + + cid := C.CString(id) + defer C.free(unsafe.Pointer(cid)) + + var cSnapName *C.char + if snapName != NoSnapshot { + cSnapName = C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + } + + var cImage C.rbd_image_t + ret := C.rbd_open_by_id_read_only( + cephIoctx(ioctx), + cid, + &cImage, + cSnapName) + + if ret != 0 { + return nil, getError(ret) + } + + return &Image{ + ioctx: ioctx, + image: cImage, + }, nil +} + +// CreateImage creates a new rbd image using provided image options. +// +// Implements: +// +// int rbd_create4(rados_ioctx_t io, const char *name, uint64_t size, +// rbd_image_options_t opts); +func CreateImage(ioctx *rados.IOContext, name string, size uint64, rio *ImageOptions) error { + if ioctx == nil { + return ErrNoIOContext + } + if name == "" { + return ErrNoName + } + if rio == nil { + return rbdError(C.EINVAL) + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + ret := C.rbd_create4(cephIoctx(ioctx), cName, + C.uint64_t(size), C.rbd_image_options_t(rio.options)) + return getError(ret) +} + +// RemoveImage removes the specified rbd image. +// +// Implements: +// +// int rbd_remove(rados_ioctx_t io, const char *name); +func RemoveImage(ioctx *rados.IOContext, name string) error { + if ioctx == nil { + return ErrNoIOContext + } + if name == "" { + return ErrNoName + } + + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + return getError(C.rbd_remove(cephIoctx(ioctx), cName)) +} + +// CloneImage creates a clone of the image from the named snapshot in the +// provided io-context with the given name and image options. +// +// Implements: +// +// int rbd_clone3(rados_ioctx_t p_ioctx, const char *p_name, +// const char *p_snapname, rados_ioctx_t c_ioctx, +// const char *c_name, rbd_image_options_t c_opts); +func CloneImage(ioctx *rados.IOContext, parentName, snapName string, + destctx *rados.IOContext, name string, rio *ImageOptions) error { + + if rio == nil { + return rbdError(C.EINVAL) + } + + cParentName := C.CString(parentName) + defer C.free(unsafe.Pointer(cParentName)) + cParentSnapName := C.CString(snapName) + defer C.free(unsafe.Pointer(cParentSnapName)) + cCloneName := C.CString(name) + defer C.free(unsafe.Pointer(cCloneName)) + + ret := C.rbd_clone3( + cephIoctx(ioctx), + cParentName, + cParentSnapName, + cephIoctx(destctx), + cCloneName, + C.rbd_image_options_t(rio.options)) + return getError(ret) +} + +// CloneFromImage creates a clone of the image from the named snapshot in the +// provided io-context with the given name and image options. +// This function is a convenience wrapper around CloneImage to support cloning +// from an existing Image. +func CloneFromImage(parent *Image, snapName string, + destctx *rados.IOContext, name string, rio *ImageOptions) error { + + if err := parent.validate(imageNeedsIOContext); err != nil { + return err + } + return CloneImage(parent.ioctx, parent.name, snapName, destctx, name, rio) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/rbd_nautilus.go b/vendor/github.com/ceph/go-ceph/rbd/rbd_nautilus.go new file mode 100644 index 0000000000..4e63563a3d --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/rbd_nautilus.go @@ -0,0 +1,118 @@ +// +// Ceph Nautilus is the first release that includes rbd_list2() and +// rbd_get_create_timestamp(). + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +// #include +import "C" + +import ( + "unsafe" + + ts "github.com/ceph/go-ceph/internal/timespec" + "github.com/ceph/go-ceph/rados" +) + +// GetImageNames returns the list of current RBD images. +func GetImageNames(ioctx *rados.IOContext) ([]string, error) { + var images []C.rbd_image_spec_t + size := C.size_t(4096) + for { + images = make([]C.rbd_image_spec_t, size) + ret := C.rbd_list2( + cephIoctx(ioctx), + (*C.rbd_image_spec_t)(unsafe.Pointer(&images[0])), + &size) + err := getErrorIfNegative(ret) + if err != nil { + if err == errRange { + continue + } + return nil, err + } + break + } + defer C.rbd_image_spec_list_cleanup((*C.rbd_image_spec_t)(unsafe.Pointer(&images[0])), size) + + names := make([]string, size) + for i, image := range images[:size] { + names[i] = C.GoString(image.name) + } + return names, nil +} + +// GetCreateTimestamp returns the time the rbd image was created. +// +// Implements: +// +// int rbd_get_create_timestamp(rbd_image_t image, struct timespec *timestamp); +func (image *Image) GetCreateTimestamp() (Timespec, error) { + if err := image.validate(imageIsOpen); err != nil { + return Timespec{}, err + } + + var cts C.struct_timespec + + if ret := C.rbd_get_create_timestamp(image.image, &cts); ret < 0 { + return Timespec{}, getError(ret) + } + + return Timespec(ts.CStructToTimespec(ts.CTimespecPtr(&cts))), nil +} + +// GetAccessTimestamp returns the time the rbd image was last accessed. +// +// Implements: +// +// int rbd_get_access_timestamp(rbd_image_t image, struct timespec *timestamp); +func (image *Image) GetAccessTimestamp() (Timespec, error) { + if err := image.validate(imageIsOpen); err != nil { + return Timespec{}, err + } + + var cts C.struct_timespec + + if ret := C.rbd_get_access_timestamp(image.image, &cts); ret < 0 { + return Timespec{}, getError(ret) + } + + return Timespec(ts.CStructToTimespec(ts.CTimespecPtr(&cts))), nil +} + +// GetModifyTimestamp returns the time the rbd image was last modified. +// +// Implements: +// +// int rbd_get_modify_timestamp(rbd_image_t image, struct timespec *timestamp); +func (image *Image) GetModifyTimestamp() (Timespec, error) { + if err := image.validate(imageIsOpen); err != nil { + return Timespec{}, err + } + + var cts C.struct_timespec + + if ret := C.rbd_get_modify_timestamp(image.image, &cts); ret < 0 { + return Timespec{}, getError(ret) + } + + return Timespec(ts.CStructToTimespec(ts.CTimespecPtr(&cts))), nil +} + +// Sparsify makes an image sparse by deallocating runs of zeros. +// The sparseSize value will be used to find runs of zeros and must be +// a power of two no less than 4096 and no larger than the image size. +// +// Implements: +// +// int rbd_sparsify(rbd_image_t image, size_t sparse_size); +func (image *Image) Sparsify(sparseSize uint) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + return getError(C.rbd_sparsify(image.image, C.size_t(sparseSize))) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/resize.go b/vendor/github.com/ceph/go-ceph/rbd/resize.go new file mode 100644 index 0000000000..1eecd24909 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/resize.go @@ -0,0 +1,77 @@ +//go:build ceph_preview + +package rbd + +/* +#cgo LDFLAGS: -lrbd +#define _POSIX_C_SOURCE 200112L +#undef _GNU_SOURCE +#include +#include +#include +#include + +extern int resize2Callback(uint64_t, uint64_t, uintptr_t); + +// inline wrapper to cast uintptr_t to void* +static inline int wrap_rbd_resize2( + rbd_image_t image, uint64_t size, bool allow_shrink, uintptr_t arg) { + return rbd_resize2( + image, size, allow_shrink, (librbd_progress_fn_t)resize2Callback, (void*)arg); +}; +*/ +import "C" + +import ( + "github.com/ceph/go-ceph/internal/callbacks" +) + +// Resize2ProgressCallback is the callback function type for Image.Resize2. +type Resize2ProgressCallback func(progress uint64, total uint64, data interface{}) int + +var resizeCallbacks = callbacks.New() + +type resizeProgressCallbackCtx struct { + callback Resize2ProgressCallback + data interface{} +} + +//export resize2Callback +func resize2Callback( + offset, total C.uint64_t, index uintptr, +) C.int { + v := resizeCallbacks.Lookup(index) + ctx := v.(resizeProgressCallbackCtx) + return C.int(ctx.callback(uint64(offset), uint64(total), ctx.data)) +} + +// Resize2 resizes an rbd image and allows configuration of allow_shrink and a callback function. The callback +// function will be called with the first argument as the progress, the second argument as the total, and the third +// argument as an opaque value that is passed to the Resize2 function's data argument in each callback execution. +// The resize operation will be aborted if the progress callback returns a non-zero value. +// +// Implements: +// +// int rbd_resize2(rbd_image_t image, uint64_t size, allow_shrink bool, librbd_progress_fn_t cb, void *cbdata); +func (image *Image) Resize2(size uint64, allowShrink bool, cb Resize2ProgressCallback, data interface{}) error { + // the provided callback must be a real function + if cb == nil { + return rbdError(C.EINVAL) + } + + if err := image.validate(imageIsOpen); err != nil { + return err + } + + ctx := resizeProgressCallbackCtx{ + callback: cb, + data: data, + } + cbIndex := resizeCallbacks.Add(ctx) + defer resizeCallbacks.Remove(cbIndex) + + ret := C.wrap_rbd_resize2(image.image, C.uint64_t(size), C.bool(allowShrink), C.uintptr_t(cbIndex)) + + return getError(ret) + +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/snapshot.go b/vendor/github.com/ceph/go-ceph/rbd/snapshot.go new file mode 100644 index 0000000000..d321b59363 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/snapshot.go @@ -0,0 +1,193 @@ +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "unsafe" + + ts "github.com/ceph/go-ceph/internal/timespec" +) + +// Snapshot represents a snapshot on a particular rbd image. +type Snapshot struct { + image *Image + name string +} + +// CreateSnapshot returns a new Snapshot objects after creating +// a snapshot of the rbd image. +// +// Implements: +// +// int rbd_snap_create(rbd_image_t image, const char *snapname); +func (image *Image) CreateSnapshot(snapname string) (*Snapshot, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + cSnapName := C.CString(snapname) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rbd_snap_create(image.image, cSnapName) + if ret < 0 { + return nil, rbdError(ret) + } + + return &Snapshot{ + image: image, + name: snapname, + }, nil +} + +// validate the attributes listed in the req bitmask, and return an error in +// case the attribute is not set +// Calls snapshot.image.validate(req) to validate the image attributes. +func (snapshot *Snapshot) validate(req uint32) error { + if hasBit(req, snapshotNeedsName) && snapshot.name == "" { + return ErrSnapshotNoName + } else if snapshot.image != nil { + return snapshot.image.validate(req) + } + + return nil +} + +// GetSnapshot constructs a snapshot object for the image given +// the snap name. It does not validate that this snapshot exists. +func (image *Image) GetSnapshot(snapname string) *Snapshot { + return &Snapshot{ + image: image, + name: snapname, + } +} + +// Remove the snapshot from the connected rbd image. +// +// Implements: +// +// int rbd_snap_remove(rbd_image_t image, const char *snapname); +func (snapshot *Snapshot) Remove() error { + if err := snapshot.validate(snapshotNeedsName | imageIsOpen); err != nil { + return err + } + + cSnapName := C.CString(snapshot.name) + defer C.free(unsafe.Pointer(cSnapName)) + + return getError(C.rbd_snap_remove(snapshot.image.image, cSnapName)) +} + +// Rollback the image to the snapshot. +// +// Implements: +// +// int rbd_snap_rollback(rbd_image_t image, const char *snapname); +func (snapshot *Snapshot) Rollback() error { + if err := snapshot.validate(snapshotNeedsName | imageIsOpen); err != nil { + return err + } + + cSnapName := C.CString(snapshot.name) + defer C.free(unsafe.Pointer(cSnapName)) + + return getError(C.rbd_snap_rollback(snapshot.image.image, cSnapName)) +} + +// Protect a snapshot from unwanted deletion. +// +// Implements: +// +// int rbd_snap_protect(rbd_image_t image, const char *snap_name); +func (snapshot *Snapshot) Protect() error { + if err := snapshot.validate(snapshotNeedsName | imageIsOpen); err != nil { + return err + } + + cSnapName := C.CString(snapshot.name) + defer C.free(unsafe.Pointer(cSnapName)) + + return getError(C.rbd_snap_protect(snapshot.image.image, cSnapName)) +} + +// Unprotect stops protecting the snapshot. +// +// Implements: +// +// int rbd_snap_unprotect(rbd_image_t image, const char *snap_name); +func (snapshot *Snapshot) Unprotect() error { + if err := snapshot.validate(snapshotNeedsName | imageIsOpen); err != nil { + return err + } + + cSnapName := C.CString(snapshot.name) + defer C.free(unsafe.Pointer(cSnapName)) + + return getError(C.rbd_snap_unprotect(snapshot.image.image, cSnapName)) +} + +// IsProtected returns true if the snapshot is currently protected. +// +// Implements: +// +// int rbd_snap_is_protected(rbd_image_t image, const char *snap_name, +// int *is_protected); +func (snapshot *Snapshot) IsProtected() (bool, error) { + if err := snapshot.validate(snapshotNeedsName | imageIsOpen); err != nil { + return false, err + } + + var cIsProtected C.int + + cSnapName := C.CString(snapshot.name) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rbd_snap_is_protected(snapshot.image.image, cSnapName, + &cIsProtected) + if ret < 0 { + return false, rbdError(ret) + } + + return cIsProtected != 0, nil +} + +// Set updates the rbd image (not the Snapshot) such that the snapshot is the +// source of readable data. +// +// Deprecated: use the SetSnapshot method of the Image type instead +// +// Implements: +// +// int rbd_snap_set(rbd_image_t image, const char *snapname); +func (snapshot *Snapshot) Set() error { + if err := snapshot.validate(snapshotNeedsName | imageIsOpen); err != nil { + return err + } + + return snapshot.image.SetSnapshot(snapshot.name) +} + +// GetSnapTimestamp returns the timestamp of a snapshot for an image. +// For a non-existing snap ID, GetSnapTimestamp() may trigger an assertion +// and crash in the ceph library. +// Check https://tracker.ceph.com/issues/47287 for details. +// +// Implements: +// +// int rbd_snap_get_timestamp(rbd_image_t image, uint64_t snap_id, struct timespec *timestamp) +func (image *Image) GetSnapTimestamp(snapID uint64) (Timespec, error) { + if err := image.validate(imageIsOpen); err != nil { + return Timespec{}, err + } + + var cts C.struct_timespec + + ret := C.rbd_snap_get_timestamp(image.image, C.uint64_t(snapID), &cts) + if ret < 0 { + return Timespec{}, getError(ret) + } + + return Timespec(ts.CStructToTimespec(ts.CTimespecPtr(&cts))), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/snapshot_namespace.go b/vendor/github.com/ceph/go-ceph/rbd/snapshot_namespace.go new file mode 100644 index 0000000000..22607a232e --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/snapshot_namespace.go @@ -0,0 +1,81 @@ +// +// Ceph Mimic introduced rbd_snap_get_namespace_type(). + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/retry" +) + +// SnapNamespaceType indicates the namespace to which the snapshot belongs to. +type SnapNamespaceType C.rbd_snap_namespace_type_t + +const ( + // SnapNamespaceTypeUser indicates that the snapshot belongs to user namespace. + SnapNamespaceTypeUser = SnapNamespaceType(C.RBD_SNAP_NAMESPACE_TYPE_USER) + + // SnapNamespaceTypeGroup indicates that the snapshot belongs to group namespace. + // Such snapshots will have associated group information. + SnapNamespaceTypeGroup = SnapNamespaceType(C.RBD_SNAP_NAMESPACE_TYPE_GROUP) + + // SnapNamespaceTypeTrash indicates that the snapshot belongs to trash namespace. + SnapNamespaceTypeTrash = SnapNamespaceType(C.RBD_SNAP_NAMESPACE_TYPE_TRASH) +) + +// GetSnapNamespaceType gets the type of namespace to which the snapshot belongs to, +// returns error on failure. +// +// Implements: +// +// int rbd_snap_get_namespace_type(rbd_image_t image, uint64_t snap_id, rbd_snap_namespace_type_t *namespace_type) +func (image *Image) GetSnapNamespaceType(snapID uint64) (SnapNamespaceType, error) { + var nsType SnapNamespaceType + + if err := image.validate(imageIsOpen); err != nil { + return nsType, err + } + + ret := C.rbd_snap_get_namespace_type(image.image, + C.uint64_t(snapID), + (*C.rbd_snap_namespace_type_t)(&nsType)) + return nsType, getError(ret) +} + +// GetSnapTrashNamespace returns the original name of the snapshot which was +// moved to the Trash. The caller should make sure that the snapshot ID passed in this +// function belongs to a snapshot already in the Trash. +// +// Implements: +// +// int rbd_snap_get_trash_namespace(rbd_image_t image, uint64_t snap_id, char *original_name, size_t max_length) +func (image *Image) GetSnapTrashNamespace(snapID uint64) (string, error) { + if err := image.validate(imageIsOpen); err != nil { + return "", err + } + + var ( + buf []byte + err error + ) + retry.WithSizes(4096, 262144, func(length int) retry.Hint { + cLength := C.size_t(length) + buf = make([]byte, cLength) + ret := C.rbd_snap_get_trash_namespace(image.image, + C.uint64_t(snapID), + (*C.char)(unsafe.Pointer(&buf[0])), + cLength) + err = getError(ret) + return retry.Size(int(cLength)).If(err == errRange) + }) + + if err != nil { + return "", err + } + return C.GoString((*C.char)(unsafe.Pointer(&buf[0]))), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/snapshot_nautilus.go b/vendor/github.com/ceph/go-ceph/rbd/snapshot_nautilus.go new file mode 100644 index 0000000000..f81e8d9a95 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/snapshot_nautilus.go @@ -0,0 +1,184 @@ +// +// Ceph Nautilus introduced rbd_get_parent() and deprecated rbd_get_parent_info(). +// Ceph Nautilus introduced rbd_list_children3() and deprecated rbd_list_children(). + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/retry" +) + +// GetParentInfo looks for the parent of the image and stores the pool, name +// and snapshot-name in the byte-arrays that are passed as arguments. +// +// Implements: +// +// int rbd_get_parent(rbd_image_t image, +// rbd_linked_image_spec_t *parent_image, +// rbd_snap_spec_t *parent_snap) +func (image *Image) GetParentInfo(pool, name, snapname []byte) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + parentImage := C.rbd_linked_image_spec_t{} + parentSnap := C.rbd_snap_spec_t{} + ret := C.rbd_get_parent(image.image, &parentImage, &parentSnap) + if ret != 0 { + return rbdError(ret) + } + + defer C.rbd_linked_image_spec_cleanup(&parentImage) + defer C.rbd_snap_spec_cleanup(&parentSnap) + + strlen := int(C.strlen(parentImage.pool_name)) + if len(pool) < strlen { + return rbdError(C.ERANGE) + } + if copy(pool, C.GoString(parentImage.pool_name)) != strlen { + return rbdError(C.ERANGE) + } + + strlen = int(C.strlen(parentImage.image_name)) + if len(name) < strlen { + return rbdError(C.ERANGE) + } + if copy(name, C.GoString(parentImage.image_name)) != strlen { + return rbdError(C.ERANGE) + } + + strlen = int(C.strlen(parentSnap.name)) + if len(snapname) < strlen { + return rbdError(C.ERANGE) + } + if copy(snapname, C.GoString(parentSnap.name)) != strlen { + return rbdError(C.ERANGE) + } + + return nil +} + +// ImageSpec represents the image information. +type ImageSpec struct { + ImageName string + ImageID string + PoolName string + PoolNamespace string + PoolID uint64 + Trash bool +} + +// SnapSpec represents the snapshot infomation. +type SnapSpec struct { + ID uint64 + SnapName string +} + +// ParentInfo represents the parent image and the parent snapshot information. +type ParentInfo struct { + Image ImageSpec + Snap SnapSpec +} + +// GetParent looks for the parent of the image and returns the parent image +// information which includes the image name, the pool name and +// the snapshot information. +// +// Implements: +// int rbd_get_parent(rbd_image_t image, rbd_linked_image_spec_t *parent_image, rbd_snap_spec_t *parent_snap) +func (image *Image) GetParent() (*ParentInfo, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + parentImage := C.rbd_linked_image_spec_t{} + parentSnap := C.rbd_snap_spec_t{} + ret := C.rbd_get_parent(image.image, &parentImage, &parentSnap) + if ret != 0 { + return nil, getError(ret) + } + defer C.rbd_linked_image_spec_cleanup(&parentImage) + defer C.rbd_snap_spec_cleanup(&parentSnap) + + imageSpec := ImageSpec{ + ImageName: C.GoString(parentImage.image_name), + ImageID: C.GoString(parentImage.image_id), + PoolName: C.GoString(parentImage.pool_name), + PoolNamespace: C.GoString(parentImage.pool_namespace), + PoolID: uint64(parentImage.pool_id), + Trash: bool(parentImage.trash), + } + + snapSpec := SnapSpec{ + ID: uint64(parentSnap.id), + SnapName: C.GoString(parentSnap.name), + } + + return &ParentInfo{ + Image: imageSpec, + Snap: snapSpec, + }, nil +} + +// ListChildren returns arrays with the pools and names of the images that are +// children of the given image. The index of the pools and images arrays can be +// used to link the two items together. +// +// Implements: +// +// int rbd_list_children3(rbd_image_t image, rbd_linked_image_spec_t *images, +// size_t *max_images); +func (image *Image) ListChildren() (pools []string, images []string, err error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, nil, err + } + + var ( + csize C.size_t + children []C.rbd_linked_image_spec_t + ) + retry.WithSizes(16, 4096, func(size int) retry.Hint { + csize = C.size_t(size) + children = make([]C.rbd_linked_image_spec_t, csize) + ret := C.rbd_list_children3( + image.image, + (*C.rbd_linked_image_spec_t)(unsafe.Pointer(&children[0])), + &csize) + err = getErrorIfNegative(ret) + return retry.Size(int(csize)).If(err == errRange) + }) + if err != nil { + return nil, nil, err + } + defer C.rbd_linked_image_spec_list_cleanup((*C.rbd_linked_image_spec_t)(unsafe.Pointer(&children[0])), csize) + + pools = make([]string, csize) + images = make([]string, csize) + for i, child := range children[:csize] { + pools[i] = C.GoString(child.pool_name) + images[i] = C.GoString(child.image_name) + } + return pools, images, nil +} + +// SetSnapByID updates the rbd image (not the Snapshot) such that the snapshot +// is the source of readable data. +// +// Implements: +// +// int rbd_snap_set_by_id(rbd_image_t image, uint64_t snap_id); +func (image *Image) SetSnapByID(snapID uint64) error { + if err := image.validate(imageIsOpen); err != nil { + return err + } + + ret := C.rbd_snap_set_by_id(image.image, C.uint64_t(snapID)) + return getError(ret) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/snapshot_octopus.go b/vendor/github.com/ceph/go-ceph/rbd/snapshot_octopus.go new file mode 100644 index 0000000000..86b8e77d3f --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/snapshot_octopus.go @@ -0,0 +1,69 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "unsafe" + + "github.com/ceph/go-ceph/internal/retry" +) + +// GetSnapID returns the snapshot ID for the given snapshot name. +// +// Implements: +// +// int rbd_snap_get_id(rbd_image_t image, const char *snapname, uint64_t *snap_id) +func (image *Image) GetSnapID(snapName string) (uint64, error) { + var snapID C.uint64_t + if err := image.validate(imageIsOpen); err != nil { + return uint64(snapID), err + } + if snapName == "" { + return uint64(snapID), ErrSnapshotNoName + } + + cSnapName := C.CString(snapName) + defer C.free(unsafe.Pointer(cSnapName)) + + ret := C.rbd_snap_get_id(image.image, cSnapName, &snapID) + return uint64(snapID), getError(ret) +} + +// GetSnapByID returns the snapshot name for the given snapshot ID. +// +// Implements: +// +// int rbd_snap_get_name(rbd_image_t image, uint64_t snap_id, char *snapname, size_t *name_len) +func (image *Image) GetSnapByID(snapID uint64) (string, error) { + if err := image.validate(imageIsOpen); err != nil { + return "", err + } + + var ( + buf []byte + err error + ) + // range from 1k to 64KiB + retry.WithSizes(1024, 1<<16, func(len int) retry.Hint { + cLen := C.size_t(len) + buf = make([]byte, cLen) + ret := C.rbd_snap_get_name( + image.image, + (C.uint64_t)(snapID), + (*C.char)(unsafe.Pointer(&buf[0])), + &cLen) + err = getError(ret) + return retry.Size(int(cLen)).If(err == errRange) + }) + + if err != nil { + return "", err + } + return C.GoString((*C.char)(unsafe.Pointer(&buf[0]))), nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/snapshot_rename.go b/vendor/github.com/ceph/go-ceph/rbd/snapshot_rename.go new file mode 100644 index 0000000000..105fc954b7 --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/snapshot_rename.go @@ -0,0 +1,35 @@ +package rbd + +// #cgo LDFLAGS: -lrbd +// #include +// #include +import "C" + +import ( + "unsafe" +) + +// Rename a snapshot. +// +// Implements: +// +// int rbd_snap_rename(rbd_image_t image, const char *snapname, +// const char* dstsnapsname); +func (snapshot *Snapshot) Rename(destName string) error { + if err := snapshot.validate(imageNeedsIOContext | imageIsOpen | imageNeedsName | snapshotNeedsName); err != nil { + return err + } + + cSrcName := C.CString(snapshot.name) + cDestName := C.CString(destName) + defer C.free(unsafe.Pointer(cSrcName)) + defer C.free(unsafe.Pointer(cDestName)) + + err := C.rbd_snap_rename(snapshot.image.image, cSrcName, cDestName) + if err != 0 { + return getError(err) + } + + snapshot.name = destName + return nil +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/sparsify.go b/vendor/github.com/ceph/go-ceph/rbd/sparsify.go new file mode 100644 index 0000000000..e46ca0d78a --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/sparsify.go @@ -0,0 +1,89 @@ +//go:build !nautilus +// +build !nautilus + +package rbd + +/* +#cgo LDFLAGS: -lrbd +#include +#include +#include + +extern int sparsifyCallback(uint64_t, uint64_t, uintptr_t); + +// inline wrapper to cast uintptr_t to void* +static inline int wrap_rbd_sparsify_with_progress( + rbd_image_t image, size_t sparse_size, uintptr_t arg) { + return rbd_sparsify_with_progress( + image, sparse_size, (librbd_progress_fn_t)sparsifyCallback, (void*)arg); +}; +*/ +import "C" + +import ( + "github.com/ceph/go-ceph/internal/callbacks" +) + +// SparsifyCallback defines the function signature needed for the +// SparsifyWithProgress callback. +// +// This callback will be called by SparsifyWithProgress when it wishes to +// report progress on sparse. The callback function will be called with the +// first argument containing the current offset within the image being made +// sparse and the second argument containing the total size of the image. The +// third argument is an opaque value that is passed to the SparsifyWithProgress +// function's data argument and every call to the callback will receive the +// same object. The sparsify operation will be aborted if the progress +// callback returns a non-zero value. +type SparsifyCallback func(uint64, uint64, interface{}) int + +var sparsifyCallbacks = callbacks.New() + +type sparsifyCallbackCtx struct { + callback SparsifyCallback + data interface{} +} + +// SparsifyWithProgress makes an image sparse by deallocating runs of zeros. +// The sparseSize value will be used to find runs of zeros and must be +// a power of two no less than 4096 and no larger than the image size. +// The given progress callback will be called to report on the progress +// of sparse. The operation will be aborted if the progress callback returns +// a non-zero value. +// +// Implements: +// +// int rbd_sparsify_with_progress(rbd_image_t image, size_t sparse_size, +// librbd_progress_fn_t cb, void *cbdata); +func (image *Image) SparsifyWithProgress( + sparseSize uint, cb SparsifyCallback, data interface{}, +) error { + // the provided callback must be a real function + if cb == nil { + return rbdError(C.EINVAL) + } + + if err := image.validate(imageIsOpen); err != nil { + return err + } + + ctx := sparsifyCallbackCtx{ + callback: cb, + data: data, + } + cbIndex := sparsifyCallbacks.Add(ctx) + defer diffIterateCallbacks.Remove(cbIndex) + + ret := C.wrap_rbd_sparsify_with_progress(image.image, C.size_t(sparseSize), C.uintptr_t(cbIndex)) + + return getError(ret) +} + +//export sparsifyCallback +func sparsifyCallback( + offset, total C.uint64_t, index uintptr, +) C.int { + v := sparsifyCallbacks.Lookup(index) + ctx := v.(sparsifyCallbackCtx) + return C.int(ctx.callback(uint64(offset), uint64(total), ctx.data)) +} diff --git a/vendor/github.com/ceph/go-ceph/rbd/watchers.go b/vendor/github.com/ceph/go-ceph/rbd/watchers.go new file mode 100644 index 0000000000..2190ba6eac --- /dev/null +++ b/vendor/github.com/ceph/go-ceph/rbd/watchers.go @@ -0,0 +1,145 @@ +package rbd + +/* +#cgo LDFLAGS: -lrbd +#include + +extern void imageWatchCallback(uintptr_t); + +// inline wrapper to cast uintptr_t to void* +static inline int wrap_rbd_update_watch(rbd_image_t image, uint64_t *handle, + uintptr_t arg) { + return rbd_update_watch(image, handle, (void*)imageWatchCallback, (void*)arg); + }; + +*/ +import "C" + +import ( + "github.com/ceph/go-ceph/internal/callbacks" + "github.com/ceph/go-ceph/internal/retry" +) + +// ImageWatcher is a representation of the rbd_image_watcher_t from librbd.h +type ImageWatcher struct { + Addr string + Id int64 + Cookie uint64 +} + +// ListWatchers returns the watchers on an RBD image. In case of an error, nil +// and an error are returned. +// +// Note: +// +// Only supported in Ceph Mimic and newer. +// +// Implements: +// +// int rbd_watchers_list(rbd_image_t image, +// rbd_image_watcher_t *watchers, size_t *max_watchers) +func (image *Image) ListWatchers() ([]ImageWatcher, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + + var ( + err error + count C.size_t + watchers []C.rbd_image_watcher_t + ) + retry.WithSizes(16, 4096, func(size int) retry.Hint { + count = C.size_t(size) + watchers = make([]C.rbd_image_watcher_t, count) + ret := C.rbd_watchers_list(image.image, &watchers[0], &count) + err = getErrorIfNegative(ret) + return retry.Size(int(count)).If(err == errRange) + }) + if err != nil { + return nil, err + } + defer C.rbd_watchers_list_cleanup(&watchers[0], count) + + imageWatchers := make([]ImageWatcher, count) + for i, watcher := range watchers[:count] { + imageWatchers[i].Addr = C.GoString(watcher.addr) + imageWatchers[i].Id = int64(watcher.id) + imageWatchers[i].Cookie = uint64(watcher.cookie) + } + return imageWatchers, nil +} + +// watchCallbacks tracks the active callbacks for rbd watches +var watchCallbacks = callbacks.New() + +// WatchCallback defines the function signature needed for the UpdateWatch +// callback. +type WatchCallback func(interface{}) + +type watchCallbackCtx struct { + callback WatchCallback + data interface{} +} + +// Watch represents an ongoing image metadata watch. +type Watch struct { + image *Image + wcc watchCallbackCtx + handle C.uint64_t + cbIndex uintptr +} + +// UpdateWatch updates the image object to watch metadata changes to the +// image, returning a Watch object. +// +// Implements: +// +// int rbd_update_watch(rbd_image_t image, uint64_t *handle, +// rbd_update_callback_t watch_cb, void *arg); +func (image *Image) UpdateWatch(cb WatchCallback, data interface{}) (*Watch, error) { + if err := image.validate(imageIsOpen); err != nil { + return nil, err + } + wcc := watchCallbackCtx{ + callback: cb, + data: data, + } + w := &Watch{ + image: image, + wcc: wcc, + cbIndex: watchCallbacks.Add(wcc), + } + + ret := C.wrap_rbd_update_watch( + image.image, + &w.handle, + C.uintptr_t(w.cbIndex)) + if ret != 0 { + return nil, getError(ret) + } + return w, nil +} + +// Unwatch un-registers the image watch. +// +// Implements: +// +// int rbd_update_unwatch(rbd_image_t image, uint64_t handle); +func (w *Watch) Unwatch() error { + if w.image == nil { + return ErrImageNotOpen + } + if err := w.image.validate(imageIsOpen); err != nil { + return err + } + ret := C.rbd_update_unwatch(w.image.image, w.handle) + watchCallbacks.Remove(w.cbIndex) + return getError(ret) +} + +//export imageWatchCallback +func imageWatchCallback(index uintptr) { + v := watchCallbacks.Lookup(index) + wcc := v.(watchCallbackCtx) + wcc.callback(wcc.data) +} diff --git a/vendor/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE new file mode 100644 index 0000000000..5f920e9732 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2022 Alan Shreve (@inconshreveable) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md new file mode 100644 index 0000000000..7a950d1774 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/README.md @@ -0,0 +1,23 @@ +# mousetrap + +mousetrap is a tiny library that answers a single question. + +On a Windows machine, was the process invoked by someone double clicking on +the executable file while browsing in explorer? + +### Motivation + +Windows developers unfamiliar with command line tools will often "double-click" +the executable for a tool. Because most CLI tools print the help and then exit +when invoked without arguments, this is often very frustrating for those users. + +mousetrap provides a way to detect these invocations so that you can provide +more helpful behavior and instructions on how to run the CLI tool. To see what +this looks like, both from an organizational and a technical perspective, see +https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/ + +### The interface + +The library exposes a single interface: + + func StartedByExplorer() (bool) diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go new file mode 100644 index 0000000000..06a91f0868 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_others.go @@ -0,0 +1,16 @@ +//go:build !windows +// +build !windows + +package mousetrap + +// StartedByExplorer returns true if the program was invoked by the user +// double-clicking on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +// +// On non-Windows platforms, it always returns false. +func StartedByExplorer() bool { + return false +} diff --git a/vendor/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go new file mode 100644 index 0000000000..0c56880216 --- /dev/null +++ b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go @@ -0,0 +1,42 @@ +package mousetrap + +import ( + "syscall" + "unsafe" +) + +func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) { + snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer syscall.CloseHandle(snapshot) + var procEntry syscall.ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = syscall.Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = syscall.Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +// StartedByExplorer returns true if the program was invoked by the user double-clicking +// on the executable from explorer.exe +// +// It is conservative and returns false if any of the internal calls fail. +// It does not guarantee that the program was run from a terminal. It only can tell you +// whether it was launched from explorer.exe +func StartedByExplorer() bool { + pe, err := getProcessEntry(syscall.Getppid()) + if err != nil { + return false + } + return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:]) +} diff --git a/vendor/github.com/rook/rook/LICENSE b/vendor/github.com/rook/rook/LICENSE new file mode 100644 index 0000000000..8e8487438a --- /dev/null +++ b/vendor/github.com/rook/rook/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Rook Authors. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 0000000000..6422595762 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,97 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + + cephv1 "github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + CephV1() cephv1.CephV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + cephV1 *cephv1.CephV1Client +} + +// CephV1 retrieves the CephV1Client +func (c *Clientset) CephV1() cephv1.CephV1Interface { + return c.cephV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.cephV1, err = cephv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.cephV1 = cephv1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.cephV1 = cephv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/doc.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/doc.go new file mode 100644 index 0000000000..41721ca52d --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 0000000000..7dc3756168 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 0000000000..8f2fcea75d --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + cephv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go new file mode 100644 index 0000000000..59f25fd0dd --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/ceph.rook.io_client.go @@ -0,0 +1,169 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type CephV1Interface interface { + RESTClient() rest.Interface + CephBlockPoolsGetter + CephBlockPoolRadosNamespacesGetter + CephBucketNotificationsGetter + CephBucketTopicsGetter + CephCOSIDriversGetter + CephClientsGetter + CephClustersGetter + CephFilesystemsGetter + CephFilesystemMirrorsGetter + CephFilesystemSubVolumeGroupsGetter + CephNFSesGetter + CephObjectRealmsGetter + CephObjectStoresGetter + CephObjectStoreUsersGetter + CephObjectZonesGetter + CephObjectZoneGroupsGetter + CephRBDMirrorsGetter +} + +// CephV1Client is used to interact with features provided by the ceph.rook.io group. +type CephV1Client struct { + restClient rest.Interface +} + +func (c *CephV1Client) CephBlockPools(namespace string) CephBlockPoolInterface { + return newCephBlockPools(c, namespace) +} + +func (c *CephV1Client) CephBlockPoolRadosNamespaces(namespace string) CephBlockPoolRadosNamespaceInterface { + return newCephBlockPoolRadosNamespaces(c, namespace) +} + +func (c *CephV1Client) CephBucketNotifications(namespace string) CephBucketNotificationInterface { + return newCephBucketNotifications(c, namespace) +} + +func (c *CephV1Client) CephBucketTopics(namespace string) CephBucketTopicInterface { + return newCephBucketTopics(c, namespace) +} + +func (c *CephV1Client) CephCOSIDrivers(namespace string) CephCOSIDriverInterface { + return newCephCOSIDrivers(c, namespace) +} + +func (c *CephV1Client) CephClients(namespace string) CephClientInterface { + return newCephClients(c, namespace) +} + +func (c *CephV1Client) CephClusters(namespace string) CephClusterInterface { + return newCephClusters(c, namespace) +} + +func (c *CephV1Client) CephFilesystems(namespace string) CephFilesystemInterface { + return newCephFilesystems(c, namespace) +} + +func (c *CephV1Client) CephFilesystemMirrors(namespace string) CephFilesystemMirrorInterface { + return newCephFilesystemMirrors(c, namespace) +} + +func (c *CephV1Client) CephFilesystemSubVolumeGroups(namespace string) CephFilesystemSubVolumeGroupInterface { + return newCephFilesystemSubVolumeGroups(c, namespace) +} + +func (c *CephV1Client) CephNFSes(namespace string) CephNFSInterface { + return newCephNFSes(c, namespace) +} + +func (c *CephV1Client) CephObjectRealms(namespace string) CephObjectRealmInterface { + return newCephObjectRealms(c, namespace) +} + +func (c *CephV1Client) CephObjectStores(namespace string) CephObjectStoreInterface { + return newCephObjectStores(c, namespace) +} + +func (c *CephV1Client) CephObjectStoreUsers(namespace string) CephObjectStoreUserInterface { + return newCephObjectStoreUsers(c, namespace) +} + +func (c *CephV1Client) CephObjectZones(namespace string) CephObjectZoneInterface { + return newCephObjectZones(c, namespace) +} + +func (c *CephV1Client) CephObjectZoneGroups(namespace string) CephObjectZoneGroupInterface { + return newCephObjectZoneGroups(c, namespace) +} + +func (c *CephV1Client) CephRBDMirrors(namespace string) CephRBDMirrorInterface { + return newCephRBDMirrors(c, namespace) +} + +// NewForConfig creates a new CephV1Client for the given config. +func NewForConfig(c *rest.Config) (*CephV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CephV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CephV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CephV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CephV1Client for the given RESTClient. +func New(c rest.Interface) *CephV1Client { + return &CephV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CephV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go new file mode 100644 index 0000000000..b222f65fdc --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpool.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephBlockPoolsGetter has a method to return a CephBlockPoolInterface. +// A group's client should implement this interface. +type CephBlockPoolsGetter interface { + CephBlockPools(namespace string) CephBlockPoolInterface +} + +// CephBlockPoolInterface has methods to work with CephBlockPool resources. +type CephBlockPoolInterface interface { + Create(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.CreateOptions) (*v1.CephBlockPool, error) + Update(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.UpdateOptions) (*v1.CephBlockPool, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephBlockPool, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephBlockPoolList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPool, err error) + CephBlockPoolExpansion +} + +// cephBlockPools implements CephBlockPoolInterface +type cephBlockPools struct { + client rest.Interface + ns string +} + +// newCephBlockPools returns a CephBlockPools +func newCephBlockPools(c *CephV1Client, namespace string) *cephBlockPools { + return &cephBlockPools{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephBlockPool, and returns the corresponding cephBlockPool object, and an error if there is any. +func (c *cephBlockPools) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephBlockPool, err error) { + result = &v1.CephBlockPool{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephblockpools"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephBlockPools that match those selectors. +func (c *cephBlockPools) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephBlockPoolList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephBlockPoolList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephblockpools"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephBlockPools. +func (c *cephBlockPools) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephblockpools"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephBlockPool and creates it. Returns the server's representation of the cephBlockPool, and an error, if there is any. +func (c *cephBlockPools) Create(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.CreateOptions) (result *v1.CephBlockPool, err error) { + result = &v1.CephBlockPool{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephblockpools"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBlockPool). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephBlockPool and updates it. Returns the server's representation of the cephBlockPool, and an error, if there is any. +func (c *cephBlockPools) Update(ctx context.Context, cephBlockPool *v1.CephBlockPool, opts metav1.UpdateOptions) (result *v1.CephBlockPool, err error) { + result = &v1.CephBlockPool{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephblockpools"). + Name(cephBlockPool.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBlockPool). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephBlockPool and deletes it. Returns an error if one occurs. +func (c *cephBlockPools) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephblockpools"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephBlockPools) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephblockpools"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephBlockPool. +func (c *cephBlockPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPool, err error) { + result = &v1.CephBlockPool{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephblockpools"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go new file mode 100644 index 0000000000..0883cb40bf --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephblockpoolradosnamespace.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephBlockPoolRadosNamespacesGetter has a method to return a CephBlockPoolRadosNamespaceInterface. +// A group's client should implement this interface. +type CephBlockPoolRadosNamespacesGetter interface { + CephBlockPoolRadosNamespaces(namespace string) CephBlockPoolRadosNamespaceInterface +} + +// CephBlockPoolRadosNamespaceInterface has methods to work with CephBlockPoolRadosNamespace resources. +type CephBlockPoolRadosNamespaceInterface interface { + Create(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.CreateOptions) (*v1.CephBlockPoolRadosNamespace, error) + Update(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.UpdateOptions) (*v1.CephBlockPoolRadosNamespace, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephBlockPoolRadosNamespace, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephBlockPoolRadosNamespaceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPoolRadosNamespace, err error) + CephBlockPoolRadosNamespaceExpansion +} + +// cephBlockPoolRadosNamespaces implements CephBlockPoolRadosNamespaceInterface +type cephBlockPoolRadosNamespaces struct { + client rest.Interface + ns string +} + +// newCephBlockPoolRadosNamespaces returns a CephBlockPoolRadosNamespaces +func newCephBlockPoolRadosNamespaces(c *CephV1Client, namespace string) *cephBlockPoolRadosNamespaces { + return &cephBlockPoolRadosNamespaces{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephBlockPoolRadosNamespace, and returns the corresponding cephBlockPoolRadosNamespace object, and an error if there is any. +func (c *cephBlockPoolRadosNamespaces) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephBlockPoolRadosNamespaces that match those selectors. +func (c *cephBlockPoolRadosNamespaces) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephBlockPoolRadosNamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephBlockPoolRadosNamespaceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephBlockPoolRadosNamespaces. +func (c *cephBlockPoolRadosNamespaces) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephBlockPoolRadosNamespace and creates it. Returns the server's representation of the cephBlockPoolRadosNamespace, and an error, if there is any. +func (c *cephBlockPoolRadosNamespaces) Create(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.CreateOptions) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBlockPoolRadosNamespace). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephBlockPoolRadosNamespace and updates it. Returns the server's representation of the cephBlockPoolRadosNamespace, and an error, if there is any. +func (c *cephBlockPoolRadosNamespaces) Update(ctx context.Context, cephBlockPoolRadosNamespace *v1.CephBlockPoolRadosNamespace, opts metav1.UpdateOptions) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(cephBlockPoolRadosNamespace.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBlockPoolRadosNamespace). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephBlockPoolRadosNamespace and deletes it. Returns an error if one occurs. +func (c *cephBlockPoolRadosNamespaces) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephBlockPoolRadosNamespaces) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephBlockPoolRadosNamespace. +func (c *cephBlockPoolRadosNamespaces) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBlockPoolRadosNamespace, err error) { + result = &v1.CephBlockPoolRadosNamespace{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephblockpoolradosnamespaces"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbucketnotification.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbucketnotification.go new file mode 100644 index 0000000000..37e9dd77ee --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbucketnotification.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephBucketNotificationsGetter has a method to return a CephBucketNotificationInterface. +// A group's client should implement this interface. +type CephBucketNotificationsGetter interface { + CephBucketNotifications(namespace string) CephBucketNotificationInterface +} + +// CephBucketNotificationInterface has methods to work with CephBucketNotification resources. +type CephBucketNotificationInterface interface { + Create(ctx context.Context, cephBucketNotification *v1.CephBucketNotification, opts metav1.CreateOptions) (*v1.CephBucketNotification, error) + Update(ctx context.Context, cephBucketNotification *v1.CephBucketNotification, opts metav1.UpdateOptions) (*v1.CephBucketNotification, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephBucketNotification, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephBucketNotificationList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBucketNotification, err error) + CephBucketNotificationExpansion +} + +// cephBucketNotifications implements CephBucketNotificationInterface +type cephBucketNotifications struct { + client rest.Interface + ns string +} + +// newCephBucketNotifications returns a CephBucketNotifications +func newCephBucketNotifications(c *CephV1Client, namespace string) *cephBucketNotifications { + return &cephBucketNotifications{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephBucketNotification, and returns the corresponding cephBucketNotification object, and an error if there is any. +func (c *cephBucketNotifications) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephBucketNotification, err error) { + result = &v1.CephBucketNotification{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephBucketNotifications that match those selectors. +func (c *cephBucketNotifications) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephBucketNotificationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephBucketNotificationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephBucketNotifications. +func (c *cephBucketNotifications) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephBucketNotification and creates it. Returns the server's representation of the cephBucketNotification, and an error, if there is any. +func (c *cephBucketNotifications) Create(ctx context.Context, cephBucketNotification *v1.CephBucketNotification, opts metav1.CreateOptions) (result *v1.CephBucketNotification, err error) { + result = &v1.CephBucketNotification{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBucketNotification). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephBucketNotification and updates it. Returns the server's representation of the cephBucketNotification, and an error, if there is any. +func (c *cephBucketNotifications) Update(ctx context.Context, cephBucketNotification *v1.CephBucketNotification, opts metav1.UpdateOptions) (result *v1.CephBucketNotification, err error) { + result = &v1.CephBucketNotification{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + Name(cephBucketNotification.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBucketNotification). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephBucketNotification and deletes it. Returns an error if one occurs. +func (c *cephBucketNotifications) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephBucketNotifications) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephbucketnotifications"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephBucketNotification. +func (c *cephBucketNotifications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBucketNotification, err error) { + result = &v1.CephBucketNotification{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephbucketnotifications"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbuckettopic.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbuckettopic.go new file mode 100644 index 0000000000..89ab7cd9b2 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephbuckettopic.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephBucketTopicsGetter has a method to return a CephBucketTopicInterface. +// A group's client should implement this interface. +type CephBucketTopicsGetter interface { + CephBucketTopics(namespace string) CephBucketTopicInterface +} + +// CephBucketTopicInterface has methods to work with CephBucketTopic resources. +type CephBucketTopicInterface interface { + Create(ctx context.Context, cephBucketTopic *v1.CephBucketTopic, opts metav1.CreateOptions) (*v1.CephBucketTopic, error) + Update(ctx context.Context, cephBucketTopic *v1.CephBucketTopic, opts metav1.UpdateOptions) (*v1.CephBucketTopic, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephBucketTopic, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephBucketTopicList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBucketTopic, err error) + CephBucketTopicExpansion +} + +// cephBucketTopics implements CephBucketTopicInterface +type cephBucketTopics struct { + client rest.Interface + ns string +} + +// newCephBucketTopics returns a CephBucketTopics +func newCephBucketTopics(c *CephV1Client, namespace string) *cephBucketTopics { + return &cephBucketTopics{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephBucketTopic, and returns the corresponding cephBucketTopic object, and an error if there is any. +func (c *cephBucketTopics) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephBucketTopic, err error) { + result = &v1.CephBucketTopic{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephbuckettopics"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephBucketTopics that match those selectors. +func (c *cephBucketTopics) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephBucketTopicList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephBucketTopicList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephbuckettopics"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephBucketTopics. +func (c *cephBucketTopics) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephbuckettopics"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephBucketTopic and creates it. Returns the server's representation of the cephBucketTopic, and an error, if there is any. +func (c *cephBucketTopics) Create(ctx context.Context, cephBucketTopic *v1.CephBucketTopic, opts metav1.CreateOptions) (result *v1.CephBucketTopic, err error) { + result = &v1.CephBucketTopic{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephbuckettopics"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBucketTopic). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephBucketTopic and updates it. Returns the server's representation of the cephBucketTopic, and an error, if there is any. +func (c *cephBucketTopics) Update(ctx context.Context, cephBucketTopic *v1.CephBucketTopic, opts metav1.UpdateOptions) (result *v1.CephBucketTopic, err error) { + result = &v1.CephBucketTopic{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephbuckettopics"). + Name(cephBucketTopic.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephBucketTopic). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephBucketTopic and deletes it. Returns an error if one occurs. +func (c *cephBucketTopics) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephbuckettopics"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephBucketTopics) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephbuckettopics"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephBucketTopic. +func (c *cephBucketTopics) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephBucketTopic, err error) { + result = &v1.CephBucketTopic{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephbuckettopics"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go new file mode 100644 index 0000000000..db45d7ef07 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephclient.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephClientsGetter has a method to return a CephClientInterface. +// A group's client should implement this interface. +type CephClientsGetter interface { + CephClients(namespace string) CephClientInterface +} + +// CephClientInterface has methods to work with CephClient resources. +type CephClientInterface interface { + Create(ctx context.Context, cephClient *v1.CephClient, opts metav1.CreateOptions) (*v1.CephClient, error) + Update(ctx context.Context, cephClient *v1.CephClient, opts metav1.UpdateOptions) (*v1.CephClient, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephClient, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephClientList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephClient, err error) + CephClientExpansion +} + +// cephClients implements CephClientInterface +type cephClients struct { + client rest.Interface + ns string +} + +// newCephClients returns a CephClients +func newCephClients(c *CephV1Client, namespace string) *cephClients { + return &cephClients{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephClient, and returns the corresponding cephClient object, and an error if there is any. +func (c *cephClients) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephClient, err error) { + result = &v1.CephClient{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephclients"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephClients that match those selectors. +func (c *cephClients) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephClientList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephClientList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephclients"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephClients. +func (c *cephClients) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephclients"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephClient and creates it. Returns the server's representation of the cephClient, and an error, if there is any. +func (c *cephClients) Create(ctx context.Context, cephClient *v1.CephClient, opts metav1.CreateOptions) (result *v1.CephClient, err error) { + result = &v1.CephClient{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephclients"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephClient). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephClient and updates it. Returns the server's representation of the cephClient, and an error, if there is any. +func (c *cephClients) Update(ctx context.Context, cephClient *v1.CephClient, opts metav1.UpdateOptions) (result *v1.CephClient, err error) { + result = &v1.CephClient{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephclients"). + Name(cephClient.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephClient). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephClient and deletes it. Returns an error if one occurs. +func (c *cephClients) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephclients"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephClients) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephclients"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephClient. +func (c *cephClients) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephClient, err error) { + result = &v1.CephClient{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephclients"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go new file mode 100644 index 0000000000..7ebe4e2c98 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcluster.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephClustersGetter has a method to return a CephClusterInterface. +// A group's client should implement this interface. +type CephClustersGetter interface { + CephClusters(namespace string) CephClusterInterface +} + +// CephClusterInterface has methods to work with CephCluster resources. +type CephClusterInterface interface { + Create(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.CreateOptions) (*v1.CephCluster, error) + Update(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.UpdateOptions) (*v1.CephCluster, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephCluster, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephClusterList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephCluster, err error) + CephClusterExpansion +} + +// cephClusters implements CephClusterInterface +type cephClusters struct { + client rest.Interface + ns string +} + +// newCephClusters returns a CephClusters +func newCephClusters(c *CephV1Client, namespace string) *cephClusters { + return &cephClusters{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephCluster, and returns the corresponding cephCluster object, and an error if there is any. +func (c *cephClusters) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephCluster, err error) { + result = &v1.CephCluster{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephclusters"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephClusters that match those selectors. +func (c *cephClusters) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephClusterList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephClusterList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephClusters. +func (c *cephClusters) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephCluster and creates it. Returns the server's representation of the cephCluster, and an error, if there is any. +func (c *cephClusters) Create(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.CreateOptions) (result *v1.CephCluster, err error) { + result = &v1.CephCluster{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephclusters"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephCluster). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephCluster and updates it. Returns the server's representation of the cephCluster, and an error, if there is any. +func (c *cephClusters) Update(ctx context.Context, cephCluster *v1.CephCluster, opts metav1.UpdateOptions) (result *v1.CephCluster, err error) { + result = &v1.CephCluster{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephclusters"). + Name(cephCluster.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephCluster). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephCluster and deletes it. Returns an error if one occurs. +func (c *cephClusters) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephclusters"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephClusters) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephclusters"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephCluster. +func (c *cephClusters) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephCluster, err error) { + result = &v1.CephCluster{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephclusters"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcosidriver.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcosidriver.go new file mode 100644 index 0000000000..7e084f5214 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephcosidriver.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephCOSIDriversGetter has a method to return a CephCOSIDriverInterface. +// A group's client should implement this interface. +type CephCOSIDriversGetter interface { + CephCOSIDrivers(namespace string) CephCOSIDriverInterface +} + +// CephCOSIDriverInterface has methods to work with CephCOSIDriver resources. +type CephCOSIDriverInterface interface { + Create(ctx context.Context, cephCOSIDriver *v1.CephCOSIDriver, opts metav1.CreateOptions) (*v1.CephCOSIDriver, error) + Update(ctx context.Context, cephCOSIDriver *v1.CephCOSIDriver, opts metav1.UpdateOptions) (*v1.CephCOSIDriver, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephCOSIDriver, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephCOSIDriverList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephCOSIDriver, err error) + CephCOSIDriverExpansion +} + +// cephCOSIDrivers implements CephCOSIDriverInterface +type cephCOSIDrivers struct { + client rest.Interface + ns string +} + +// newCephCOSIDrivers returns a CephCOSIDrivers +func newCephCOSIDrivers(c *CephV1Client, namespace string) *cephCOSIDrivers { + return &cephCOSIDrivers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephCOSIDriver, and returns the corresponding cephCOSIDriver object, and an error if there is any. +func (c *cephCOSIDrivers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephCOSIDriver, err error) { + result = &v1.CephCOSIDriver{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephcosidrivers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephCOSIDrivers that match those selectors. +func (c *cephCOSIDrivers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephCOSIDriverList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephCOSIDriverList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephcosidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephCOSIDrivers. +func (c *cephCOSIDrivers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephcosidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephCOSIDriver and creates it. Returns the server's representation of the cephCOSIDriver, and an error, if there is any. +func (c *cephCOSIDrivers) Create(ctx context.Context, cephCOSIDriver *v1.CephCOSIDriver, opts metav1.CreateOptions) (result *v1.CephCOSIDriver, err error) { + result = &v1.CephCOSIDriver{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephcosidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephCOSIDriver). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephCOSIDriver and updates it. Returns the server's representation of the cephCOSIDriver, and an error, if there is any. +func (c *cephCOSIDrivers) Update(ctx context.Context, cephCOSIDriver *v1.CephCOSIDriver, opts metav1.UpdateOptions) (result *v1.CephCOSIDriver, err error) { + result = &v1.CephCOSIDriver{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephcosidrivers"). + Name(cephCOSIDriver.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephCOSIDriver). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephCOSIDriver and deletes it. Returns an error if one occurs. +func (c *cephCOSIDrivers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephcosidrivers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephCOSIDrivers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephcosidrivers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephCOSIDriver. +func (c *cephCOSIDrivers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephCOSIDriver, err error) { + result = &v1.CephCOSIDriver{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephcosidrivers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go new file mode 100644 index 0000000000..1dccce1ef2 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystem.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephFilesystemsGetter has a method to return a CephFilesystemInterface. +// A group's client should implement this interface. +type CephFilesystemsGetter interface { + CephFilesystems(namespace string) CephFilesystemInterface +} + +// CephFilesystemInterface has methods to work with CephFilesystem resources. +type CephFilesystemInterface interface { + Create(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.CreateOptions) (*v1.CephFilesystem, error) + Update(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.UpdateOptions) (*v1.CephFilesystem, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephFilesystem, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephFilesystemList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystem, err error) + CephFilesystemExpansion +} + +// cephFilesystems implements CephFilesystemInterface +type cephFilesystems struct { + client rest.Interface + ns string +} + +// newCephFilesystems returns a CephFilesystems +func newCephFilesystems(c *CephV1Client, namespace string) *cephFilesystems { + return &cephFilesystems{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephFilesystem, and returns the corresponding cephFilesystem object, and an error if there is any. +func (c *cephFilesystems) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephFilesystem, err error) { + result = &v1.CephFilesystem{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystems"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephFilesystems that match those selectors. +func (c *cephFilesystems) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephFilesystemList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephFilesystemList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystems"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephFilesystems. +func (c *cephFilesystems) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystems"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephFilesystem and creates it. Returns the server's representation of the cephFilesystem, and an error, if there is any. +func (c *cephFilesystems) Create(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.CreateOptions) (result *v1.CephFilesystem, err error) { + result = &v1.CephFilesystem{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephfilesystems"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystem). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephFilesystem and updates it. Returns the server's representation of the cephFilesystem, and an error, if there is any. +func (c *cephFilesystems) Update(ctx context.Context, cephFilesystem *v1.CephFilesystem, opts metav1.UpdateOptions) (result *v1.CephFilesystem, err error) { + result = &v1.CephFilesystem{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephfilesystems"). + Name(cephFilesystem.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystem). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephFilesystem and deletes it. Returns an error if one occurs. +func (c *cephFilesystems) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystems"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephFilesystems) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystems"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephFilesystem. +func (c *cephFilesystems) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystem, err error) { + result = &v1.CephFilesystem{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephfilesystems"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go new file mode 100644 index 0000000000..867b42f092 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemmirror.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephFilesystemMirrorsGetter has a method to return a CephFilesystemMirrorInterface. +// A group's client should implement this interface. +type CephFilesystemMirrorsGetter interface { + CephFilesystemMirrors(namespace string) CephFilesystemMirrorInterface +} + +// CephFilesystemMirrorInterface has methods to work with CephFilesystemMirror resources. +type CephFilesystemMirrorInterface interface { + Create(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.CreateOptions) (*v1.CephFilesystemMirror, error) + Update(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.UpdateOptions) (*v1.CephFilesystemMirror, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephFilesystemMirror, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephFilesystemMirrorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemMirror, err error) + CephFilesystemMirrorExpansion +} + +// cephFilesystemMirrors implements CephFilesystemMirrorInterface +type cephFilesystemMirrors struct { + client rest.Interface + ns string +} + +// newCephFilesystemMirrors returns a CephFilesystemMirrors +func newCephFilesystemMirrors(c *CephV1Client, namespace string) *cephFilesystemMirrors { + return &cephFilesystemMirrors{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephFilesystemMirror, and returns the corresponding cephFilesystemMirror object, and an error if there is any. +func (c *cephFilesystemMirrors) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephFilesystemMirror, err error) { + result = &v1.CephFilesystemMirror{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephFilesystemMirrors that match those selectors. +func (c *cephFilesystemMirrors) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephFilesystemMirrorList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephFilesystemMirrorList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephFilesystemMirrors. +func (c *cephFilesystemMirrors) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephFilesystemMirror and creates it. Returns the server's representation of the cephFilesystemMirror, and an error, if there is any. +func (c *cephFilesystemMirrors) Create(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.CreateOptions) (result *v1.CephFilesystemMirror, err error) { + result = &v1.CephFilesystemMirror{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystemMirror). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephFilesystemMirror and updates it. Returns the server's representation of the cephFilesystemMirror, and an error, if there is any. +func (c *cephFilesystemMirrors) Update(ctx context.Context, cephFilesystemMirror *v1.CephFilesystemMirror, opts metav1.UpdateOptions) (result *v1.CephFilesystemMirror, err error) { + result = &v1.CephFilesystemMirror{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + Name(cephFilesystemMirror.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystemMirror). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephFilesystemMirror and deletes it. Returns an error if one occurs. +func (c *cephFilesystemMirrors) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephFilesystemMirrors) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephFilesystemMirror. +func (c *cephFilesystemMirrors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemMirror, err error) { + result = &v1.CephFilesystemMirror{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephfilesystemmirrors"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go new file mode 100644 index 0000000000..80a66e56aa --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephfilesystemsubvolumegroup.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephFilesystemSubVolumeGroupsGetter has a method to return a CephFilesystemSubVolumeGroupInterface. +// A group's client should implement this interface. +type CephFilesystemSubVolumeGroupsGetter interface { + CephFilesystemSubVolumeGroups(namespace string) CephFilesystemSubVolumeGroupInterface +} + +// CephFilesystemSubVolumeGroupInterface has methods to work with CephFilesystemSubVolumeGroup resources. +type CephFilesystemSubVolumeGroupInterface interface { + Create(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.CreateOptions) (*v1.CephFilesystemSubVolumeGroup, error) + Update(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.UpdateOptions) (*v1.CephFilesystemSubVolumeGroup, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephFilesystemSubVolumeGroup, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephFilesystemSubVolumeGroupList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemSubVolumeGroup, err error) + CephFilesystemSubVolumeGroupExpansion +} + +// cephFilesystemSubVolumeGroups implements CephFilesystemSubVolumeGroupInterface +type cephFilesystemSubVolumeGroups struct { + client rest.Interface + ns string +} + +// newCephFilesystemSubVolumeGroups returns a CephFilesystemSubVolumeGroups +func newCephFilesystemSubVolumeGroups(c *CephV1Client, namespace string) *cephFilesystemSubVolumeGroups { + return &cephFilesystemSubVolumeGroups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephFilesystemSubVolumeGroup, and returns the corresponding cephFilesystemSubVolumeGroup object, and an error if there is any. +func (c *cephFilesystemSubVolumeGroups) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephFilesystemSubVolumeGroups that match those selectors. +func (c *cephFilesystemSubVolumeGroups) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephFilesystemSubVolumeGroupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephFilesystemSubVolumeGroupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephFilesystemSubVolumeGroups. +func (c *cephFilesystemSubVolumeGroups) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephFilesystemSubVolumeGroup and creates it. Returns the server's representation of the cephFilesystemSubVolumeGroup, and an error, if there is any. +func (c *cephFilesystemSubVolumeGroups) Create(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.CreateOptions) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystemSubVolumeGroup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephFilesystemSubVolumeGroup and updates it. Returns the server's representation of the cephFilesystemSubVolumeGroup, and an error, if there is any. +func (c *cephFilesystemSubVolumeGroups) Update(ctx context.Context, cephFilesystemSubVolumeGroup *v1.CephFilesystemSubVolumeGroup, opts metav1.UpdateOptions) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(cephFilesystemSubVolumeGroup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephFilesystemSubVolumeGroup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephFilesystemSubVolumeGroup and deletes it. Returns an error if one occurs. +func (c *cephFilesystemSubVolumeGroups) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephFilesystemSubVolumeGroups) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephFilesystemSubVolumeGroup. +func (c *cephFilesystemSubVolumeGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephFilesystemSubVolumeGroup, err error) { + result = &v1.CephFilesystemSubVolumeGroup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephfilesystemsubvolumegroups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go new file mode 100644 index 0000000000..bc43516542 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephnfs.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephNFSesGetter has a method to return a CephNFSInterface. +// A group's client should implement this interface. +type CephNFSesGetter interface { + CephNFSes(namespace string) CephNFSInterface +} + +// CephNFSInterface has methods to work with CephNFS resources. +type CephNFSInterface interface { + Create(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.CreateOptions) (*v1.CephNFS, error) + Update(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.UpdateOptions) (*v1.CephNFS, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephNFS, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephNFSList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephNFS, err error) + CephNFSExpansion +} + +// cephNFSes implements CephNFSInterface +type cephNFSes struct { + client rest.Interface + ns string +} + +// newCephNFSes returns a CephNFSes +func newCephNFSes(c *CephV1Client, namespace string) *cephNFSes { + return &cephNFSes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephNFS, and returns the corresponding cephNFS object, and an error if there is any. +func (c *cephNFSes) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephNFS, err error) { + result = &v1.CephNFS{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephnfses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephNFSes that match those selectors. +func (c *cephNFSes) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephNFSList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephNFSList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephnfses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephNFSes. +func (c *cephNFSes) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephnfses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephNFS and creates it. Returns the server's representation of the cephNFS, and an error, if there is any. +func (c *cephNFSes) Create(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.CreateOptions) (result *v1.CephNFS, err error) { + result = &v1.CephNFS{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephnfses"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephNFS). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephNFS and updates it. Returns the server's representation of the cephNFS, and an error, if there is any. +func (c *cephNFSes) Update(ctx context.Context, cephNFS *v1.CephNFS, opts metav1.UpdateOptions) (result *v1.CephNFS, err error) { + result = &v1.CephNFS{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephnfses"). + Name(cephNFS.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephNFS). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephNFS and deletes it. Returns an error if one occurs. +func (c *cephNFSes) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephnfses"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephNFSes) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephnfses"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephNFS. +func (c *cephNFSes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephNFS, err error) { + result = &v1.CephNFS{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephnfses"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go new file mode 100644 index 0000000000..a408ab8526 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectrealm.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephObjectRealmsGetter has a method to return a CephObjectRealmInterface. +// A group's client should implement this interface. +type CephObjectRealmsGetter interface { + CephObjectRealms(namespace string) CephObjectRealmInterface +} + +// CephObjectRealmInterface has methods to work with CephObjectRealm resources. +type CephObjectRealmInterface interface { + Create(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.CreateOptions) (*v1.CephObjectRealm, error) + Update(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.UpdateOptions) (*v1.CephObjectRealm, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectRealm, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectRealmList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectRealm, err error) + CephObjectRealmExpansion +} + +// cephObjectRealms implements CephObjectRealmInterface +type cephObjectRealms struct { + client rest.Interface + ns string +} + +// newCephObjectRealms returns a CephObjectRealms +func newCephObjectRealms(c *CephV1Client, namespace string) *cephObjectRealms { + return &cephObjectRealms{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephObjectRealm, and returns the corresponding cephObjectRealm object, and an error if there is any. +func (c *cephObjectRealms) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectRealm, err error) { + result = &v1.CephObjectRealm{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectrealms"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephObjectRealms that match those selectors. +func (c *cephObjectRealms) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectRealmList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephObjectRealmList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectrealms"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephObjectRealms. +func (c *cephObjectRealms) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephobjectrealms"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephObjectRealm and creates it. Returns the server's representation of the cephObjectRealm, and an error, if there is any. +func (c *cephObjectRealms) Create(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.CreateOptions) (result *v1.CephObjectRealm, err error) { + result = &v1.CephObjectRealm{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephobjectrealms"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectRealm). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephObjectRealm and updates it. Returns the server's representation of the cephObjectRealm, and an error, if there is any. +func (c *cephObjectRealms) Update(ctx context.Context, cephObjectRealm *v1.CephObjectRealm, opts metav1.UpdateOptions) (result *v1.CephObjectRealm, err error) { + result = &v1.CephObjectRealm{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephobjectrealms"). + Name(cephObjectRealm.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectRealm). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephObjectRealm and deletes it. Returns an error if one occurs. +func (c *cephObjectRealms) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectrealms"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephObjectRealms) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectrealms"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephObjectRealm. +func (c *cephObjectRealms) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectRealm, err error) { + result = &v1.CephObjectRealm{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephobjectrealms"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go new file mode 100644 index 0000000000..7b31563635 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstore.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephObjectStoresGetter has a method to return a CephObjectStoreInterface. +// A group's client should implement this interface. +type CephObjectStoresGetter interface { + CephObjectStores(namespace string) CephObjectStoreInterface +} + +// CephObjectStoreInterface has methods to work with CephObjectStore resources. +type CephObjectStoreInterface interface { + Create(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.CreateOptions) (*v1.CephObjectStore, error) + Update(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.UpdateOptions) (*v1.CephObjectStore, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectStore, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectStoreList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStore, err error) + CephObjectStoreExpansion +} + +// cephObjectStores implements CephObjectStoreInterface +type cephObjectStores struct { + client rest.Interface + ns string +} + +// newCephObjectStores returns a CephObjectStores +func newCephObjectStores(c *CephV1Client, namespace string) *cephObjectStores { + return &cephObjectStores{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephObjectStore, and returns the corresponding cephObjectStore object, and an error if there is any. +func (c *cephObjectStores) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectStore, err error) { + result = &v1.CephObjectStore{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectstores"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephObjectStores that match those selectors. +func (c *cephObjectStores) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectStoreList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephObjectStoreList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectstores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephObjectStores. +func (c *cephObjectStores) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephobjectstores"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephObjectStore and creates it. Returns the server's representation of the cephObjectStore, and an error, if there is any. +func (c *cephObjectStores) Create(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.CreateOptions) (result *v1.CephObjectStore, err error) { + result = &v1.CephObjectStore{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephobjectstores"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectStore). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephObjectStore and updates it. Returns the server's representation of the cephObjectStore, and an error, if there is any. +func (c *cephObjectStores) Update(ctx context.Context, cephObjectStore *v1.CephObjectStore, opts metav1.UpdateOptions) (result *v1.CephObjectStore, err error) { + result = &v1.CephObjectStore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephobjectstores"). + Name(cephObjectStore.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectStore). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephObjectStore and deletes it. Returns an error if one occurs. +func (c *cephObjectStores) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectstores"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephObjectStores) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectstores"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephObjectStore. +func (c *cephObjectStores) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStore, err error) { + result = &v1.CephObjectStore{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephobjectstores"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go new file mode 100644 index 0000000000..69e929c27b --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectstoreuser.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephObjectStoreUsersGetter has a method to return a CephObjectStoreUserInterface. +// A group's client should implement this interface. +type CephObjectStoreUsersGetter interface { + CephObjectStoreUsers(namespace string) CephObjectStoreUserInterface +} + +// CephObjectStoreUserInterface has methods to work with CephObjectStoreUser resources. +type CephObjectStoreUserInterface interface { + Create(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.CreateOptions) (*v1.CephObjectStoreUser, error) + Update(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.UpdateOptions) (*v1.CephObjectStoreUser, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectStoreUser, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectStoreUserList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStoreUser, err error) + CephObjectStoreUserExpansion +} + +// cephObjectStoreUsers implements CephObjectStoreUserInterface +type cephObjectStoreUsers struct { + client rest.Interface + ns string +} + +// newCephObjectStoreUsers returns a CephObjectStoreUsers +func newCephObjectStoreUsers(c *CephV1Client, namespace string) *cephObjectStoreUsers { + return &cephObjectStoreUsers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephObjectStoreUser, and returns the corresponding cephObjectStoreUser object, and an error if there is any. +func (c *cephObjectStoreUsers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectStoreUser, err error) { + result = &v1.CephObjectStoreUser{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephObjectStoreUsers that match those selectors. +func (c *cephObjectStoreUsers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectStoreUserList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephObjectStoreUserList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephObjectStoreUsers. +func (c *cephObjectStoreUsers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephObjectStoreUser and creates it. Returns the server's representation of the cephObjectStoreUser, and an error, if there is any. +func (c *cephObjectStoreUsers) Create(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.CreateOptions) (result *v1.CephObjectStoreUser, err error) { + result = &v1.CephObjectStoreUser{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectStoreUser). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephObjectStoreUser and updates it. Returns the server's representation of the cephObjectStoreUser, and an error, if there is any. +func (c *cephObjectStoreUsers) Update(ctx context.Context, cephObjectStoreUser *v1.CephObjectStoreUser, opts metav1.UpdateOptions) (result *v1.CephObjectStoreUser, err error) { + result = &v1.CephObjectStoreUser{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + Name(cephObjectStoreUser.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectStoreUser). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephObjectStoreUser and deletes it. Returns an error if one occurs. +func (c *cephObjectStoreUsers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephObjectStoreUsers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephObjectStoreUser. +func (c *cephObjectStoreUsers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectStoreUser, err error) { + result = &v1.CephObjectStoreUser{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephobjectstoreusers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go new file mode 100644 index 0000000000..315d93c3be --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzone.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephObjectZonesGetter has a method to return a CephObjectZoneInterface. +// A group's client should implement this interface. +type CephObjectZonesGetter interface { + CephObjectZones(namespace string) CephObjectZoneInterface +} + +// CephObjectZoneInterface has methods to work with CephObjectZone resources. +type CephObjectZoneInterface interface { + Create(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.CreateOptions) (*v1.CephObjectZone, error) + Update(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.UpdateOptions) (*v1.CephObjectZone, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectZone, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectZoneList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZone, err error) + CephObjectZoneExpansion +} + +// cephObjectZones implements CephObjectZoneInterface +type cephObjectZones struct { + client rest.Interface + ns string +} + +// newCephObjectZones returns a CephObjectZones +func newCephObjectZones(c *CephV1Client, namespace string) *cephObjectZones { + return &cephObjectZones{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephObjectZone, and returns the corresponding cephObjectZone object, and an error if there is any. +func (c *cephObjectZones) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectZone, err error) { + result = &v1.CephObjectZone{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectzones"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephObjectZones that match those selectors. +func (c *cephObjectZones) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectZoneList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephObjectZoneList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectzones"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephObjectZones. +func (c *cephObjectZones) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephobjectzones"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephObjectZone and creates it. Returns the server's representation of the cephObjectZone, and an error, if there is any. +func (c *cephObjectZones) Create(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.CreateOptions) (result *v1.CephObjectZone, err error) { + result = &v1.CephObjectZone{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephobjectzones"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectZone). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephObjectZone and updates it. Returns the server's representation of the cephObjectZone, and an error, if there is any. +func (c *cephObjectZones) Update(ctx context.Context, cephObjectZone *v1.CephObjectZone, opts metav1.UpdateOptions) (result *v1.CephObjectZone, err error) { + result = &v1.CephObjectZone{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephobjectzones"). + Name(cephObjectZone.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectZone). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephObjectZone and deletes it. Returns an error if one occurs. +func (c *cephObjectZones) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectzones"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephObjectZones) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectzones"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephObjectZone. +func (c *cephObjectZones) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZone, err error) { + result = &v1.CephObjectZone{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephobjectzones"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go new file mode 100644 index 0000000000..11899408a3 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephobjectzonegroup.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephObjectZoneGroupsGetter has a method to return a CephObjectZoneGroupInterface. +// A group's client should implement this interface. +type CephObjectZoneGroupsGetter interface { + CephObjectZoneGroups(namespace string) CephObjectZoneGroupInterface +} + +// CephObjectZoneGroupInterface has methods to work with CephObjectZoneGroup resources. +type CephObjectZoneGroupInterface interface { + Create(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.CreateOptions) (*v1.CephObjectZoneGroup, error) + Update(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.UpdateOptions) (*v1.CephObjectZoneGroup, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephObjectZoneGroup, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephObjectZoneGroupList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZoneGroup, err error) + CephObjectZoneGroupExpansion +} + +// cephObjectZoneGroups implements CephObjectZoneGroupInterface +type cephObjectZoneGroups struct { + client rest.Interface + ns string +} + +// newCephObjectZoneGroups returns a CephObjectZoneGroups +func newCephObjectZoneGroups(c *CephV1Client, namespace string) *cephObjectZoneGroups { + return &cephObjectZoneGroups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephObjectZoneGroup, and returns the corresponding cephObjectZoneGroup object, and an error if there is any. +func (c *cephObjectZoneGroups) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephObjectZoneGroup, err error) { + result = &v1.CephObjectZoneGroup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephObjectZoneGroups that match those selectors. +func (c *cephObjectZoneGroups) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephObjectZoneGroupList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephObjectZoneGroupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephObjectZoneGroups. +func (c *cephObjectZoneGroups) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephObjectZoneGroup and creates it. Returns the server's representation of the cephObjectZoneGroup, and an error, if there is any. +func (c *cephObjectZoneGroups) Create(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.CreateOptions) (result *v1.CephObjectZoneGroup, err error) { + result = &v1.CephObjectZoneGroup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectZoneGroup). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephObjectZoneGroup and updates it. Returns the server's representation of the cephObjectZoneGroup, and an error, if there is any. +func (c *cephObjectZoneGroups) Update(ctx context.Context, cephObjectZoneGroup *v1.CephObjectZoneGroup, opts metav1.UpdateOptions) (result *v1.CephObjectZoneGroup, err error) { + result = &v1.CephObjectZoneGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + Name(cephObjectZoneGroup.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephObjectZoneGroup). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephObjectZoneGroup and deletes it. Returns an error if one occurs. +func (c *cephObjectZoneGroups) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephObjectZoneGroups) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephObjectZoneGroup. +func (c *cephObjectZoneGroups) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephObjectZoneGroup, err error) { + result = &v1.CephObjectZoneGroup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephobjectzonegroups"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go new file mode 100644 index 0000000000..524e8a98f5 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/cephrbdmirror.go @@ -0,0 +1,178 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1" + scheme "github.com/rook/rook/pkg/client/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CephRBDMirrorsGetter has a method to return a CephRBDMirrorInterface. +// A group's client should implement this interface. +type CephRBDMirrorsGetter interface { + CephRBDMirrors(namespace string) CephRBDMirrorInterface +} + +// CephRBDMirrorInterface has methods to work with CephRBDMirror resources. +type CephRBDMirrorInterface interface { + Create(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.CreateOptions) (*v1.CephRBDMirror, error) + Update(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.UpdateOptions) (*v1.CephRBDMirror, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.CephRBDMirror, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.CephRBDMirrorList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephRBDMirror, err error) + CephRBDMirrorExpansion +} + +// cephRBDMirrors implements CephRBDMirrorInterface +type cephRBDMirrors struct { + client rest.Interface + ns string +} + +// newCephRBDMirrors returns a CephRBDMirrors +func newCephRBDMirrors(c *CephV1Client, namespace string) *cephRBDMirrors { + return &cephRBDMirrors{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cephRBDMirror, and returns the corresponding cephRBDMirror object, and an error if there is any. +func (c *cephRBDMirrors) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.CephRBDMirror, err error) { + result = &v1.CephRBDMirror{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CephRBDMirrors that match those selectors. +func (c *cephRBDMirrors) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CephRBDMirrorList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.CephRBDMirrorList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cephRBDMirrors. +func (c *cephRBDMirrors) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a cephRBDMirror and creates it. Returns the server's representation of the cephRBDMirror, and an error, if there is any. +func (c *cephRBDMirrors) Create(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.CreateOptions) (result *v1.CephRBDMirror, err error) { + result = &v1.CephRBDMirror{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephRBDMirror). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a cephRBDMirror and updates it. Returns the server's representation of the cephRBDMirror, and an error, if there is any. +func (c *cephRBDMirrors) Update(ctx context.Context, cephRBDMirror *v1.CephRBDMirror, opts metav1.UpdateOptions) (result *v1.CephRBDMirror, err error) { + result = &v1.CephRBDMirror{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + Name(cephRBDMirror.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(cephRBDMirror). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the cephRBDMirror and deletes it. Returns an error if one occurs. +func (c *cephRBDMirrors) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cephRBDMirrors) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cephrbdmirrors"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched cephRBDMirror. +func (c *cephRBDMirrors) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.CephRBDMirror, err error) { + result = &v1.CephRBDMirror{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cephrbdmirrors"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go new file mode 100644 index 0000000000..3af5d054f1 --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go new file mode 100644 index 0000000000..ca470183bb --- /dev/null +++ b/vendor/github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1/generated_expansion.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type CephBlockPoolExpansion interface{} + +type CephBlockPoolRadosNamespaceExpansion interface{} + +type CephBucketNotificationExpansion interface{} + +type CephBucketTopicExpansion interface{} + +type CephCOSIDriverExpansion interface{} + +type CephClientExpansion interface{} + +type CephClusterExpansion interface{} + +type CephFilesystemExpansion interface{} + +type CephFilesystemMirrorExpansion interface{} + +type CephFilesystemSubVolumeGroupExpansion interface{} + +type CephNFSExpansion interface{} + +type CephObjectRealmExpansion interface{} + +type CephObjectStoreExpansion interface{} + +type CephObjectStoreUserExpansion interface{} + +type CephObjectZoneExpansion interface{} + +type CephObjectZoneGroupExpansion interface{} + +type CephRBDMirrorExpansion interface{} diff --git a/vendor/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore new file mode 100644 index 0000000000..c7b459e4dd --- /dev/null +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -0,0 +1,39 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore +# swap +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +# session +Session.vim +# temporary +.netrwhist +*~ +# auto-generated tag files +tags + +*.exe +cobra.test +bin + +.idea/ +*.iml diff --git a/vendor/github.com/spf13/cobra/.golangci.yml b/vendor/github.com/spf13/cobra/.golangci.yml new file mode 100644 index 0000000000..a618ec24d8 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.golangci.yml @@ -0,0 +1,62 @@ +# Copyright 2013-2023 The Cobra Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +run: + deadline: 5m + +linters: + disable-all: true + enable: + #- bodyclose + # - deadcode ! deprecated since v1.49.0; replaced by 'unused' + #- depguard + #- dogsled + #- dupl + - errcheck + #- exhaustive + #- funlen + - gas + #- gochecknoinits + - goconst + #- gocritic + #- gocyclo + #- gofmt + - goimports + - golint + #- gomnd + #- goprintffuncname + #- gosec + #- gosimple + - govet + - ineffassign + - interfacer + #- lll + - maligned + - megacheck + #- misspell + #- nakedret + #- noctx + #- nolintlint + #- rowserrcheck + #- scopelint + #- staticcheck + #- structcheck ! deprecated since v1.49.0; replaced by 'unused' + #- stylecheck + #- typecheck + - unconvert + #- unparam + - unused + # - varcheck ! deprecated since v1.49.0; replaced by 'unused' + #- whitespace + fast: false diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 0000000000..94ec53068a --- /dev/null +++ b/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/CONDUCT.md b/vendor/github.com/spf13/cobra/CONDUCT.md new file mode 100644 index 0000000000..9d16f88fd1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONDUCT.md @@ -0,0 +1,37 @@ +## Cobra User Contract + +### Versioning +Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release. + +### Backward Compatibility +We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released. + +### Deprecation +Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github. + +### CVE +Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one. + +### Communication +Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors. + +### Breaking Changes +Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra. + +There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version. + +Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release. + +Examples of breaking changes include: +- Removing or renaming exported constant, variable, type, or function. +- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc... + - Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing. + +There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging. + +### CI Testing +Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang. + +### Disclaimer +Changes to this document and the contents therein are at the discretion of the maintainers. +None of the contents of this document are legally binding in any way to the maintainers or the users. diff --git a/vendor/github.com/spf13/cobra/CONTRIBUTING.md b/vendor/github.com/spf13/cobra/CONTRIBUTING.md new file mode 100644 index 0000000000..6f356e6a82 --- /dev/null +++ b/vendor/github.com/spf13/cobra/CONTRIBUTING.md @@ -0,0 +1,50 @@ +# Contributing to Cobra + +Thank you so much for contributing to Cobra. We appreciate your time and help. +Here are some guidelines to help you get started. + +## Code of Conduct + +Be kind and respectful to the members of the community. Take time to educate +others who are seeking help. Harassment of any kind will not be tolerated. + +## Questions + +If you have questions regarding Cobra, feel free to ask it in the community +[#cobra Slack channel][cobra-slack] + +## Filing a bug or feature + +1. Before filing an issue, please check the existing issues to see if a + similar one was already opened. If there is one already opened, feel free + to comment on it. +1. If you believe you've found a bug, please provide detailed steps of + reproduction, the version of Cobra and anything else you believe will be + useful to help troubleshoot it (e.g. OS environment, environment variables, + etc...). Also state the current behavior vs. the expected behavior. +1. If you'd like to see a feature or an enhancement please open an issue with + a clear title and description of what the feature is and why it would be + beneficial to the project and its users. + +## Submitting changes + +1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to + sign a CLA. Please sign the CLA :slightly_smiling_face: +1. Tests: If you are submitting code, please ensure you have adequate tests + for the feature. Tests can be run via `go test ./...` or `make test`. +1. Since this is golang project, ensure the new code is properly formatted to + ensure code consistency. Run `make all`. + +### Quick steps to contribute + +1. Fork the project. +1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`) +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Make changes and run tests (`make test`) +1. Add them to staging (`git add .`) +1. Commit your changes (`git commit -m 'Add some feature'`) +1. Push to the branch (`git push origin my-new-feature`) +1. Create new pull request + + +[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199 diff --git a/vendor/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt new file mode 100644 index 0000000000..298f0e2665 --- /dev/null +++ b/vendor/github.com/spf13/cobra/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/cobra/MAINTAINERS b/vendor/github.com/spf13/cobra/MAINTAINERS new file mode 100644 index 0000000000..4c5ac3dd99 --- /dev/null +++ b/vendor/github.com/spf13/cobra/MAINTAINERS @@ -0,0 +1,13 @@ +maintainers: +- spf13 +- johnSchnake +- jpmcb +- marckhouzam +inactive: +- anthonyfok +- bep +- bogem +- broady +- eparis +- jharshman +- wfernandes diff --git a/vendor/github.com/spf13/cobra/Makefile b/vendor/github.com/spf13/cobra/Makefile new file mode 100644 index 0000000000..0da8d7aa08 --- /dev/null +++ b/vendor/github.com/spf13/cobra/Makefile @@ -0,0 +1,35 @@ +BIN="./bin" +SRC=$(shell find . -name "*.go") + +ifeq (, $(shell which golangci-lint)) +$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh") +endif + +.PHONY: fmt lint test install_deps clean + +default: all + +all: fmt test + +fmt: + $(info ******************** checking formatting ********************) + @test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1) + +lint: + $(info ******************** running lint tools ********************) + golangci-lint run -v + +test: install_deps + $(info ******************** running tests ********************) + go test -v ./... + +richtest: install_deps + $(info ******************** running tests with kyoh86/richgo ********************) + richgo test -v ./... + +install_deps: + $(info ******************** downloading dependencies ********************) + go get -v ./... + +clean: + rm -rf $(BIN) diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 0000000000..6444f4b7f6 --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,112 @@ +![cobra logo](assets/CobraMain.png) + +Cobra is a library for creating powerful modern CLI applications. + +Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/), +[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to +name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra. + +[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest) +[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra) +[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra) +[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Grouping help for subcommands +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell) +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibility to define your own help, usage, etc. +* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications read like sentences when used, and as a result, users +intuitively know how to interact with them. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE` + or +`APPNAME COMMAND ARG --FLAG`. + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command) + +## Flags + +A flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/spf13/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. + +``` +go get -u github.com/spf13/cobra@latest +``` + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Usage +`cobra-cli` is a command line program to generate cobra applications and command files. +It will bootstrap your application scaffolding to rapidly +develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application. + +It can be installed by running: + +``` +go install github.com/spf13/cobra-cli@latest +``` + +For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md) + +For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md). + +# License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt) diff --git a/vendor/github.com/spf13/cobra/active_help.go b/vendor/github.com/spf13/cobra/active_help.go new file mode 100644 index 0000000000..5f965e057f --- /dev/null +++ b/vendor/github.com/spf13/cobra/active_help.go @@ -0,0 +1,67 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "os" + "regexp" + "strings" +) + +const ( + activeHelpMarker = "_activeHelp_ " + // The below values should not be changed: programs will be using them explicitly + // in their user documentation, and users will be using them explicitly. + activeHelpEnvVarSuffix = "_ACTIVE_HELP" + activeHelpGlobalEnvVar = "COBRA_ACTIVE_HELP" + activeHelpGlobalDisable = "0" +) + +var activeHelpEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`) + +// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp. +// Such strings will be processed by the completion script and will be shown as ActiveHelp +// to the user. +// The array parameter should be the array that will contain the completions. +// This function can be called multiple times before and/or after completions are added to +// the array. Each time this function is called with the same array, the new +// ActiveHelp line will be shown below the previous ones when completion is triggered. +func AppendActiveHelp(compArray []string, activeHelpStr string) []string { + return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr)) +} + +// GetActiveHelpConfig returns the value of the ActiveHelp environment variable +// _ACTIVE_HELP where is the name of the root command in upper +// case, with all non-ASCII-alphanumeric characters replaced by `_`. +// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP +// is set to "0". +func GetActiveHelpConfig(cmd *Command) string { + activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar) + if activeHelpCfg != activeHelpGlobalDisable { + activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name())) + } + return activeHelpCfg +} + +// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment +// variable. It has the format _ACTIVE_HELP where is the name of the +// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`. +func activeHelpEnvVar(name string) string { + // This format should not be changed: users will be using it explicitly. + activeHelpEnvVar := strings.ToUpper(fmt.Sprintf("%s%s", name, activeHelpEnvVarSuffix)) + activeHelpEnvVar = activeHelpEnvVarPrefixSubstRegexp.ReplaceAllString(activeHelpEnvVar, "_") + return activeHelpEnvVar +} diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go new file mode 100644 index 0000000000..e79ec33a81 --- /dev/null +++ b/vendor/github.com/spf13/cobra/args.go @@ -0,0 +1,131 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "strings" +) + +type PositionalArgs func(cmd *Command, args []string) error + +// legacyArgs validation has the following behaviour: +// - root commands with no subcommands can take arbitrary arguments +// - root commands with subcommands will do subcommand validity checking +// - subcommands will always accept arbitrary arguments +func legacyArgs(cmd *Command, args []string) error { + // no subcommand, always take args + if !cmd.HasSubCommands() { + return nil + } + + // root command with subcommands, do subcommand checking. + if !cmd.HasParent() && len(args) > 0 { + return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + return nil +} + +// NoArgs returns an error if any args are included. +func NoArgs(cmd *Command, args []string) error { + if len(args) > 0 { + return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) + } + return nil +} + +// OnlyValidArgs returns an error if there are any positional args that are not in +// the `ValidArgs` field of `Command` +func OnlyValidArgs(cmd *Command, args []string) error { + if len(cmd.ValidArgs) > 0 { + // Remove any description that may be included in ValidArgs. + // A description is following a tab character. + var validArgs []string + for _, v := range cmd.ValidArgs { + validArgs = append(validArgs, strings.Split(v, "\t")[0]) + } + for _, v := range args { + if !stringInSlice(v, validArgs) { + return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0])) + } + } + } + return nil +} + +// ArbitraryArgs never returns an error. +func ArbitraryArgs(cmd *Command, args []string) error { + return nil +} + +// MinimumNArgs returns an error if there is not at least N args. +func MinimumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < n { + return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args)) + } + return nil + } +} + +// MaximumNArgs returns an error if there are more than N args. +func MaximumNArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) > n { + return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// ExactArgs returns an error if there are not exactly n args. +func ExactArgs(n int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) != n { + return fmt.Errorf("accepts %d arg(s), received %d", n, len(args)) + } + return nil + } +} + +// RangeArgs returns an error if the number of args is not within the expected range. +func RangeArgs(min int, max int) PositionalArgs { + return func(cmd *Command, args []string) error { + if len(args) < min || len(args) > max { + return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args)) + } + return nil + } +} + +// MatchAll allows combining several PositionalArgs to work in concert. +func MatchAll(pargs ...PositionalArgs) PositionalArgs { + return func(cmd *Command, args []string) error { + for _, parg := range pargs { + if err := parg(cmd, args); err != nil { + return err + } + } + return nil + } +} + +// ExactValidArgs returns an error if there are not exactly N positional args OR +// there are any positional args that are not in the `ValidArgs` field of `Command` +// +// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead +func ExactValidArgs(n int) PositionalArgs { + return MatchAll(ExactArgs(n), OnlyValidArgs) +} diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 0000000000..8a53151840 --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,712 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +// Annotations for Bash completion. +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func writePreamble(buf io.StringWriter, name string) { + WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__%[1]s_index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__%[1]s_contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__%[1]s_handle_go_custom_completion() +{ + __%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}" + + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + + local out requestComp lastParam lastChar comp directive args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows handling aliases + args=("${words[@]:1}") + # Disable ActiveHelp which is not supported for bash completion v1 + requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}" + + if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [ "${directive}" = "${out}" ]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}" + __%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + # Error code. No completion. + __%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code" + return + else + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no space" + compopt -o nospace + fi + fi + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + if [[ $(type -t compopt) = "builtin" ]]; then + __%[1]s_debug "${FUNCNAME[0]}: activating no file completion" + compopt +o default + fi + fi + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local fullFilter filter filteringCmd + # Do not use quotes around the $out variable or else newline + # characters will be kept. + for filter in ${out}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subdir + # Use printf to strip any trailing newline + subdir=$(printf "%%s" "${out}") + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + __%[1]s_handle_subdirs_in_dir_flag "$subdir" + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${out}" -- "$cur") + fi +} + +__%[1]s_handle_reply() +{ + __%[1]s_debug "${FUNCNAME[0]}" + local comp + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${allflags[*]}" -- "$cur") + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" + COMPREPLY=() + if [[ ${index} -ge 0 ]]; then + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION:-}" ]; then + # zsh completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + + if [[ -z "${flag_parsing_disabled}" ]]; then + # If flag parsing is enabled, we have completed the flags and can return. + # If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough + # to possibly call handle_go_custom_completion. + return 0; + fi + ;; + esac + + # check if we are handling a flag with special work handling + local index + __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions+=("${must_have_one_noun[@]}") + elif [[ -n "${has_completion_function}" ]]; then + # if a go completion function is provided, defer to that function + __%[1]s_handle_go_custom_completion + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${completions[*]}" -- "$cur") + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + while IFS='' read -r comp; do + COMPREPLY+=("$comp") + done < <(compgen -W "${noun_aliases[*]}" -- "$cur") + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + if declare -F __%[1]s_custom_func >/dev/null; then + # try command name qualified custom func + __%[1]s_custom_func + else + # otherwise fall back to unqualified for compatibility + declare -F __custom_func >/dev/null && __custom_func + fi + fi + + # available in bash-completion >= 2, not always present on macOS + if declare -F __ltrim_colon_completions >/dev/null; then + __ltrim_colon_completions "$cur" + fi + + # If there is only 1 completion and it is a flag with an = it will be completed + # but we don't want a space after the = + if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then + compopt -o nospace + fi +} + +# The arguments should be in the form "ext1|ext2|extn" +__%[1]s_handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__%[1]s_handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return +} + +__%[1]s_handle_flag() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue="" + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" + if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + # flaghash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + fi + + # skip the argument to a two word flag + if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then + __%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument" + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__%[1]s_handle_noun() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__%[1]s_handle_command() +{ + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_%[1]s_root_command" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F "$next_command" >/dev/null && $next_command +} + +__%[1]s_handle_word() +{ + if [[ $c -ge $cword ]]; then + __%[1]s_handle_reply + return + fi + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __%[1]s_handle_flag + elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then + __%[1]s_handle_command + elif [[ $c -eq 0 ]]; then + __%[1]s_handle_command + elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then + # aliashash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then + words[c]=${aliashash[${words[c]}]} + __%[1]s_handle_command + else + __%[1]s_handle_noun + fi + else + __%[1]s_handle_noun + fi + __%[1]s_handle_word +} + +`, name, ShellCompNoDescRequestCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name))) +} + +func writePostscript(buf io.StringWriter, name string) { + name = strings.ReplaceAll(name, ":", "__") + WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(`{ + local cur prev words cword split + declare -A flaghash 2>/dev/null || : + declare -A aliashash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __%[1]s_init_completion -n "=" || return + fi + + local c=0 + local flag_parsing_disabled= + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%[1]s") + local command_aliases=() + local must_have_one_flag=() + local must_have_one_noun=() + local has_completion_function="" + local last_command="" + local nouns=() + local noun_aliases=() + + __%[1]s_handle_word +} + +`, name)) + WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name)) + WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n") +} + +func writeCommands(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " commands=()\n") + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() && c != cmd.helpCommand { + continue + } + WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name())) + writeCmdAliases(buf, c) + } + WriteStringAndCheck(buf, "\n") +} + +func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) > 0 { + ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") + } else { + ext = "_filedir" + } + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + case BashCompCustom: + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + if len(value) > 0 { + handlers := strings.Join(value, "; ") + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers)) + } else { + WriteStringAndCheck(buf, " flags_completion+=(:)\n") + } + case BashCompSubdirsInDir: + WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name)) + + var ext string + if len(value) == 1 { + ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] + } else { + ext = "_filedir -d" + } + WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext)) + } + } +} + +const cbn = "\")\n" + +func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { + name := flag.Shorthand + format := " " + if len(flag.NoOptDefVal) == 0 { + format += "two_word_" + } + format += "flags+=(\"-%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) +} + +func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) { + name := flag.Name + format := " flags+=(\"--%s" + if len(flag.NoOptDefVal) == 0 { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.NoOptDefVal) == 0 { + format = " two_word_flags+=(\"--%s" + cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + } + writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) +} + +func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) { + name := flag.Name + format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn + if len(flag.NoOptDefVal) == 0 { + format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn + } + WriteStringAndCheck(buf, fmt.Sprintf(format, name)) + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand)) + } +} + +// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags +func prepareCustomAnnotationsForFlags(cmd *Command) { + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + for flag := range flagCompletionFunctions { + // Make sure the completion script calls the __*_go_custom_completion function for + // every registered flag. We need to do this here (and not when the flag was registered + // for completion) so that we can know the root command name for the prefix + // of ___go_custom_completion + if flag.Annotations == nil { + flag.Annotations = map[string][]string{} + } + flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())} + } +} + +func writeFlags(buf io.StringWriter, cmd *Command) { + prepareCustomAnnotationsForFlags(cmd) + WriteStringAndCheck(buf, ` flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + +`) + + if cmd.DisableFlagParsing { + WriteStringAndCheck(buf, " flag_parsing_disabled=1\n") + } + + localNonPersistentFlags := cmd.LocalNonPersistentFlags() + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + // localNonPersistentFlags are used to stop the completion of subcommands when one is set + // if TraverseChildren is true we should allow to complete subcommands + if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren { + writeLocalNonPersistentFlag(buf, flag) + } + }) + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + writeFlag(buf, flag, cmd) + if len(flag.Shorthand) > 0 { + writeShortFlag(buf, flag, cmd) + } + }) + + WriteStringAndCheck(buf, "\n") +} + +func writeRequiredFlag(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_flag=()\n") + flags := cmd.NonInheritedFlags() + flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + if flag.Value.Type() != "bool" { + format += "=" + } + format += cbn + WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name)) + + if len(flag.Shorthand) > 0 { + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand)) + } + } + } + }) +} + +func writeRequiredNouns(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " must_have_one_noun=()\n") + sort.Strings(cmd.ValidArgs) + for _, value := range cmd.ValidArgs { + // Remove any description that may be included following a tab character. + // Descriptions are not supported by bash completion. + value = strings.Split(value, "\t")[0] + WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value)) + } + if cmd.ValidArgsFunction != nil { + WriteStringAndCheck(buf, " has_completion_function=1\n") + } +} + +func writeCmdAliases(buf io.StringWriter, cmd *Command) { + if len(cmd.Aliases) == 0 { + return + } + + sort.Strings(cmd.Aliases) + + WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n")) + for _, value := range cmd.Aliases { + WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value)) + WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + } + WriteStringAndCheck(buf, ` fi`) + WriteStringAndCheck(buf, "\n") +} +func writeArgAliases(buf io.StringWriter, cmd *Command) { + WriteStringAndCheck(buf, " noun_aliases=()\n") + sort.Strings(cmd.ArgAliases) + for _, value := range cmd.ArgAliases { + WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value)) + } +} + +func gen(buf io.StringWriter, cmd *Command) { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() && c != cmd.helpCommand { + continue + } + gen(buf, c) + } + commandName := cmd.CommandPath() + commandName = strings.ReplaceAll(commandName, " ", "_") + commandName = strings.ReplaceAll(commandName, ":", "__") + + if cmd.Root() == cmd { + WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + } else { + WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName)) + } + + WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName)) + WriteStringAndCheck(buf, "\n") + WriteStringAndCheck(buf, " command_aliases=()\n") + WriteStringAndCheck(buf, "\n") + + writeCommands(buf, cmd) + writeFlags(buf, cmd) + writeRequiredFlag(buf, cmd) + writeRequiredNouns(buf, cmd) + writeArgAliases(buf, cmd) + WriteStringAndCheck(buf, "}\n\n") +} + +// GenBashCompletion generates bash completion file and writes to the passed writer. +func (c *Command) GenBashCompletion(w io.Writer) error { + buf := new(bytes.Buffer) + writePreamble(buf, c.Name()) + if len(c.BashCompletionFunction) > 0 { + buf.WriteString(c.BashCompletionFunction + "\n") + } + gen(buf, c) + writePostscript(buf, c.Name()) + + _, err := buf.WriteTo(w) + return err +} + +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +// GenBashCompletionFile generates bash completion file. +func (c *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletion(outFile) +} diff --git a/vendor/github.com/spf13/cobra/bash_completionsV2.go b/vendor/github.com/spf13/cobra/bash_completionsV2.go new file mode 100644 index 0000000000..1cce5c329c --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completionsV2.go @@ -0,0 +1,396 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genBashComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genBashComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + + WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Macs have bash3 for which the bash-completion package doesn't include +# _init_completion. This is a minimal version of that function. +__%[1]s_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +# This function calls the %[1]s program to obtain the completion +# results and the directive. It fills the 'out' and 'directive' vars. +__%[1]s_get_completion_results() { + local requestComp lastParam lastChar args + + # Prepare the command to request completions for the program. + # Calling ${words[0]} instead of directly %[1]s allows handling aliases + args=("${words[@]:1}") + requestComp="${words[0]} %[2]s ${args[*]}" + + lastParam=${words[$((${#words[@]}-1))]} + lastChar=${lastParam:$((${#lastParam}-1)):1} + __%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}" + + if [[ -z ${cur} && ${lastChar} != = ]]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} ''" + fi + + # When completing a flag with an = (e.g., %[1]s -n=) + # bash focuses on the part after the =, so we need to remove + # the flag part from $cur + if [[ ${cur} == -*=* ]]; then + cur="${cur#*=}" + fi + + __%[1]s_debug "Calling ${requestComp}" + # Use eval to handle any environment variables and such + out=$(eval "${requestComp}" 2>/dev/null) + + # Extract the directive integer at the very end of the output following a colon (:) + directive=${out##*:} + # Remove the directive + out=${out%%:*} + if [[ ${directive} == "${out}" ]]; then + # There is not directive specified + directive=0 + fi + __%[1]s_debug "The completion directive is: ${directive}" + __%[1]s_debug "The completions are: ${out}" +} + +__%[1]s_process_completion_results() { + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d + + if (((directive & shellCompDirectiveError) != 0)); then + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + else + if (((directive & shellCompDirectiveNoSpace) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + __%[1]s_debug "Activating no space" + compopt -o nospace + else + __%[1]s_debug "No space directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveKeepOrder) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + # no sort isn't supported for bash less than < 4.4 + if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then + __%[1]s_debug "No sort directive not supported in this version of bash" + else + __%[1]s_debug "Activating keep order" + compopt -o nosort + fi + else + __%[1]s_debug "No sort directive not supported in this version of bash" + fi + fi + if (((directive & shellCompDirectiveNoFileComp) != 0)); then + if [[ $(type -t compopt) == builtin ]]; then + __%[1]s_debug "Activating no file completion" + compopt +o default + else + __%[1]s_debug "No file completion directive not supported in this version of bash" + fi + fi + fi + + # Separate activeHelp from normal completions + local completions=() + local activeHelp=() + __%[1]s_extract_activeHelp + + if (((directive & shellCompDirectiveFilterFileExt) != 0)); then + # File extension filtering + local fullFilter filter filteringCmd + + # Do not use quotes around the $completions variable or else newline + # characters will be kept. + for filter in ${completions[*]}; do + fullFilter+="$filter|" + done + + filteringCmd="_filedir $fullFilter" + __%[1]s_debug "File filtering command: $filteringCmd" + $filteringCmd + elif (((directive & shellCompDirectiveFilterDirs) != 0)); then + # File completion for directories only + + local subdir + subdir=${completions[0]} + if [[ -n $subdir ]]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return + else + __%[1]s_debug "Listing directories in ." + _filedir -d + fi + else + __%[1]s_handle_completion_types + fi + + __%[1]s_handle_special_char "$cur" : + __%[1]s_handle_special_char "$cur" = + + # Print the activeHelp statements before we finish + if ((${#activeHelp[*]} != 0)); then + printf "\n"; + printf "%%s\n" "${activeHelp[@]}" + printf "\n" + + # The prompt format is only available from bash 4.4. + # We test if it is available before using it. + if (x=${PS1@P}) 2> /dev/null; then + printf "%%s" "${PS1@P}${COMP_LINE[@]}" + else + # Can't print the prompt. Just print the + # text the user had typed, it is workable enough. + printf "%%s" "${COMP_LINE[@]}" + fi + fi +} + +# Separate activeHelp lines from real completions. +# Fills the $activeHelp and $completions arrays. +__%[1]s_extract_activeHelp() { + local activeHelpMarker="%[9]s" + local endIndex=${#activeHelpMarker} + + while IFS='' read -r comp; do + if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then + comp=${comp:endIndex} + __%[1]s_debug "ActiveHelp found: $comp" + if [[ -n $comp ]]; then + activeHelp+=("$comp") + fi + else + # Not an activeHelp line but a normal completion + completions+=("$comp") + fi + done <<<"${out}" +} + +__%[1]s_handle_completion_types() { + __%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE" + + case $COMP_TYPE in + 37|42) + # Type: menu-complete/menu-complete-backward and insert-completions + # If the user requested inserting one completion at a time, or all + # completions at once on the command-line we must remove the descriptions. + # https://github.com/spf13/cobra/issues/1508 + local tab=$'\t' comp + while IFS='' read -r comp; do + [[ -z $comp ]] && continue + # Strip any description + comp=${comp%%%%$tab*} + # Only consider the completions that match + if [[ $comp == "$cur"* ]]; then + COMPREPLY+=("$comp") + fi + done < <(printf "%%s\n" "${completions[@]}") + ;; + + *) + # Type: complete (normal completion) + __%[1]s_handle_standard_completion_case + ;; + esac +} + +__%[1]s_handle_standard_completion_case() { + local tab=$'\t' comp + + # Short circuit to optimize if we don't have descriptions + if [[ "${completions[*]}" != *$tab* ]]; then + IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur") + return 0 + fi + + local longest=0 + local compline + # Look for the longest completion so that we can format things nicely + while IFS='' read -r compline; do + [[ -z $compline ]] && continue + # Strip any description before checking the length + comp=${compline%%%%$tab*} + # Only consider the completions that match + [[ $comp == "$cur"* ]] || continue + COMPREPLY+=("$compline") + if ((${#comp}>longest)); then + longest=${#comp} + fi + done < <(printf "%%s\n" "${completions[@]}") + + # If there is a single completion left, remove the description text + if ((${#COMPREPLY[*]} == 1)); then + __%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}" + comp="${COMPREPLY[0]%%%%$tab*}" + __%[1]s_debug "Removed description from single completion, which is now: ${comp}" + COMPREPLY[0]=$comp + else # Format the descriptions + __%[1]s_format_comp_descriptions $longest + fi +} + +__%[1]s_handle_special_char() +{ + local comp="$1" + local char=$2 + if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then + local word=${comp%%"${comp##*${char}}"} + local idx=${#COMPREPLY[*]} + while ((--idx >= 0)); do + COMPREPLY[idx]=${COMPREPLY[idx]#"$word"} + done + fi +} + +__%[1]s_format_comp_descriptions() +{ + local tab=$'\t' + local comp desc maxdesclength + local longest=$1 + + local i ci + for ci in ${!COMPREPLY[*]}; do + comp=${COMPREPLY[ci]} + # Properly format the description string which follows a tab character if there is one + if [[ "$comp" == *$tab* ]]; then + __%[1]s_debug "Original comp: $comp" + desc=${comp#*$tab} + comp=${comp%%%%$tab*} + + # $COLUMNS stores the current shell width. + # Remove an extra 4 because we add 2 spaces and 2 parentheses. + maxdesclength=$(( COLUMNS - longest - 4 )) + + # Make sure we can fit a description of at least 8 characters + # if we are to align the descriptions. + if ((maxdesclength > 8)); then + # Add the proper number of spaces to align the descriptions + for ((i = ${#comp} ; i < longest ; i++)); do + comp+=" " + done + else + # Don't pad the descriptions so we can fit more text after the completion + maxdesclength=$(( COLUMNS - ${#comp} - 4 )) + fi + + # If there is enough space for any description text, + # truncate the descriptions that are too long for the shell width + if ((maxdesclength > 0)); then + if ((${#desc} > maxdesclength)); then + desc=${desc:0:$(( maxdesclength - 1 ))} + desc+="…" + fi + comp+=" ($desc)" + fi + COMPREPLY[ci]=$comp + __%[1]s_debug "Final comp: $comp" + fi + done +} + +__start_%[1]s() +{ + local cur prev words cword split + + COMPREPLY=() + + # Call _init_completion from the bash-completion package + # to prepare the arguments properly + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -n =: || return + else + __%[1]s_init_completion -n =: || return + fi + + __%[1]s_debug + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $cword location, so we need + # to truncate the command-line ($words) up to the $cword location. + words=("${words[@]:0:$cword+1}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + local out directive + __%[1]s_get_completion_results + __%[1]s_process_completion_results +} + +if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%[1]s %[1]s +else + complete -o default -o nospace -F __start_%[1]s %[1]s +fi + +# ex: ts=4 sw=4 et filetype=sh +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, + activeHelpMarker)) +} + +// GenBashCompletionFileV2 generates Bash completion version 2. +func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenBashCompletionV2(outFile, includeDesc) +} + +// GenBashCompletionV2 generates Bash completion file version 2 +// and writes it to the passed writer. +func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error { + return c.genBashCompletion(w, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 0000000000..a6b160ce53 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,244 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "os" + "reflect" + "strconv" + "strings" + "text/template" + "time" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() +var finalizers []func() + +const ( + defaultPrefixMatching = false + defaultCommandSorting = true + defaultCaseInsensitive = false + defaultTraverseRunHooks = false +) + +// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing +// to automatically enable in CLI tools. +// Set this to true to enable it. +var EnablePrefixMatching = defaultPrefixMatching + +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. +var EnableCommandSorting = defaultCommandSorting + +// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default) +var EnableCaseInsensitive = defaultCaseInsensitive + +// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents. +// By default this is disabled, which means only the first run hook to be found is executed. +var EnableTraverseRunHooks = defaultTraverseRunHooks + +// MousetrapHelpText enables an information splash screen on Windows +// if the CLI is started from explorer.exe. +// To disable the mousetrap, just set this variable to blank string (""). +// Works only on Microsoft Windows. +var MousetrapHelpText = `This is a command line tool. + +You need to open cmd.exe and run it from there. +` + +// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows +// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed. +// To disable the mousetrap, just set MousetrapHelpText to blank string (""). +// Works only on Microsoft Windows. +var MousetrapDisplayDuration = 5 * time.Second + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +// AddTemplateFuncs adds multiple template functions that are available to Usage and +// Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +// OnInitialize sets the passed functions to be run when each command's +// Execute method is called. +func OnInitialize(y ...func()) { + initializers = append(initializers, y...) +} + +// OnFinalize sets the passed functions to be run when each command's +// Execute method is terminated. +func OnFinalize(y ...func()) { + finalizers = append(finalizers, y...) +} + +// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra. + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +// rpad adds padding to the right of a string. +func rpad(s string, padding int) string { + formattedString := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(formattedString, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them. +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing. +func CheckErr(msg interface{}) { + if msg != nil { + fmt.Fprintln(os.Stderr, "Error:", msg) + os.Exit(1) + } +} + +// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil. +func WriteStringAndCheck(b io.StringWriter, s string) { + _, err := b.WriteString(s) + CheckErr(err) +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go new file mode 100644 index 0000000000..2fbe6c131a --- /dev/null +++ b/vendor/github.com/spf13/cobra/command.go @@ -0,0 +1,1885 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cobra is a commander providing a simple interface to create powerful modern CLI interfaces. +// In addition to providing an interface, Cobra simultaneously provides a controller to organize your application code. +package cobra + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +const ( + FlagSetByCobraAnnotation = "cobra_annotation_flag_set_by_cobra" + CommandDisplayNameAnnotation = "cobra_annotation_command_display_name" +) + +// FParseErrWhitelist configures Flag parse errors to be ignored +type FParseErrWhitelist flag.ParseErrorsWhitelist + +// Group Structure to manage groups for commands +type Group struct { + ID string + Title string +} + +// Command is just that, a command for your application. +// E.g. 'go run ...' - 'run' is the command. Cobra requires +// you to define the usage and description as part of your command +// definition to ensure usability. +type Command struct { + // Use is the one-line usage message. + // Recommended syntax is as follows: + // [ ] identifies an optional argument. Arguments that are not enclosed in brackets are required. + // ... indicates that you can specify multiple values for the previous argument. + // | indicates mutually exclusive information. You can use the argument to the left of the separator or the + // argument to the right of the separator. You cannot use both arguments in a single use of the command. + // { } delimits a set of mutually exclusive arguments when one of the arguments is required. If the arguments are + // optional, they are enclosed in brackets ([ ]). + // Example: add [-F file | -D dir]... [-f format] profile + Use string + + // Aliases is an array of aliases that can be used instead of the first word in Use. + Aliases []string + + // SuggestFor is an array of command names for which this command will be suggested - + // similar to aliases but only suggests. + SuggestFor []string + + // Short is the short description shown in the 'help' output. + Short string + + // The group id under which this subcommand is grouped in the 'help' output of its parent. + GroupID string + + // Long is the long message shown in the 'help ' output. + Long string + + // Example is examples of how to use the command. + Example string + + // ValidArgs is list of all valid non-flag arguments that are accepted in shell completions + ValidArgs []string + // ValidArgsFunction is an optional function that provides valid non-flag arguments for shell completion. + // It is a dynamic version of using ValidArgs. + // Only one of ValidArgs and ValidArgsFunction can be used for a command. + ValidArgsFunction func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + + // Expected arguments + Args PositionalArgs + + // ArgAliases is List of aliases for ValidArgs. + // These are not suggested to the user in the shell completion, + // but accepted if entered manually. + ArgAliases []string + + // BashCompletionFunction is custom bash functions used by the legacy bash autocompletion generator. + // For portability with other shells, it is recommended to instead use ValidArgsFunction + BashCompletionFunction string + + // Deprecated defines, if this command is deprecated and should print this string when used. + Deprecated string + + // Annotations are key/value pairs that can be used by applications to identify or + // group commands or set special options. + Annotations map[string]string + + // Version defines the version for this command. If this value is non-empty and the command does not + // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, + // will print content of the "Version" variable. A shorthand "v" flag will also be added if the + // command does not define one. + Version string + + // The *Run functions are executed in the following order: + // * PersistentPreRun() + // * PreRun() + // * Run() + // * PostRun() + // * PersistentPostRun() + // All functions get the same args, the arguments after the command name. + // The *PreRun and *PostRun functions will only be executed if the Run function of the current + // command has been declared. + // + // PersistentPreRun: children of this command will inherit and execute. + PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error. + PersistentPreRunE func(cmd *Command, args []string) error + // PreRun: children of this command will not inherit. + PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error. + PreRunE func(cmd *Command, args []string) error + // Run: Typically the actual work function. Most commands will only implement this. + Run func(cmd *Command, args []string) + // RunE: Run but returns an error. + RunE func(cmd *Command, args []string) error + // PostRun: run after the Run command. + PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error. + PostRunE func(cmd *Command, args []string) error + // PersistentPostRun: children of this command will inherit and execute after PostRun. + PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error. + PersistentPostRunE func(cmd *Command, args []string) error + + // groups for subcommands + commandgroups []*Group + + // args is actual args parsed from flags. + args []string + // flagErrorBuf contains all error messages from pflag. + flagErrorBuf *bytes.Buffer + // flags is full set of flags. + flags *flag.FlagSet + // pflags contains persistent flags. + pflags *flag.FlagSet + // lflags contains local flags. + lflags *flag.FlagSet + // iflags contains inherited flags. + iflags *flag.FlagSet + // parentsPflags is all persistent flags of cmd's parents. + parentsPflags *flag.FlagSet + // globNormFunc is the global normalization function + // that we can use on every pflag set and children commands + globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // usageFunc is usage func defined by user. + usageFunc func(*Command) error + // usageTemplate is usage template defined by user. + usageTemplate string + // flagErrorFunc is func defined by user and it's called when the parsing of + // flags returns an error. + flagErrorFunc func(*Command, error) error + // helpTemplate is help template defined by user. + helpTemplate string + // helpFunc is help func defined by user. + helpFunc func(*Command, []string) + // helpCommand is command with usage 'help'. If it's not defined by user, + // cobra uses default help command. + helpCommand *Command + // helpCommandGroupID is the group id for the helpCommand + helpCommandGroupID string + + // completionCommandGroupID is the group id for the completion command + completionCommandGroupID string + + // versionTemplate is the version template defined by user. + versionTemplate string + + // errPrefix is the error message prefix defined by user. + errPrefix string + + // inReader is a reader defined by the user that replaces stdin + inReader io.Reader + // outWriter is a writer defined by the user that replaces stdout + outWriter io.Writer + // errWriter is a writer defined by the user that replaces stderr + errWriter io.Writer + + // FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + + // CompletionOptions is a set of options to control the handling of shell completion + CompletionOptions CompletionOptions + + // commandsAreSorted defines, if command slice are sorted or not. + commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } + + ctx context.Context + + // commands is the list of commands supported by this program. + commands []*Command + // parent is a parent command for this command. + parent *Command + // Max lengths of commands' string lengths for use in padding. + commandsMaxUseLen int + commandsMaxCommandPathLen int + commandsMaxNameLen int + + // TraverseChildren parses flags on all parents before executing child command. + TraverseChildren bool + + // Hidden defines, if this command is hidden and should NOT show up in the list of available commands. + Hidden bool + + // SilenceErrors is an option to quiet errors down stream. + SilenceErrors bool + + // SilenceUsage is an option to silence usage when an error occurs. + SilenceUsage bool + + // DisableFlagParsing disables the flag parsing. + // If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool + + // DisableAutoGenTag defines, if gen tag ("Auto generated by spf13/cobra...") + // will be printed by generating docs for this command. + DisableAutoGenTag bool + + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + + // DisableSuggestions disables the suggestions based on Levenshtein distance + // that go along with 'unknown command' messages. + DisableSuggestions bool + + // SuggestionsMinimumDistance defines minimum levenshtein distance to display suggestions. + // Must be > 0. + SuggestionsMinimumDistance int +} + +// Context returns underlying command context. If command was executed +// with ExecuteContext or the context was set with SetContext, the +// previously set context will be returned. Otherwise, nil is returned. +// +// Notice that a call to Execute and ExecuteC will replace a nil context of +// a command with a context.Background, so a background context will be +// returned by Context after one of these functions has been called. +func (c *Command) Context() context.Context { + return c.ctx +} + +// SetContext sets context for the command. This context will be overwritten by +// Command.ExecuteContext or Command.ExecuteContextC. +func (c *Command) SetContext(ctx context.Context) { + c.ctx = ctx +} + +// SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden +// particularly useful when testing. +func (c *Command) SetArgs(a []string) { + c.args = a +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +// Deprecated: Use SetOut and/or SetErr instead +func (c *Command) SetOutput(output io.Writer) { + c.outWriter = output + c.errWriter = output +} + +// SetOut sets the destination for usage messages. +// If newOut is nil, os.Stdout is used. +func (c *Command) SetOut(newOut io.Writer) { + c.outWriter = newOut +} + +// SetErr sets the destination for error messages. +// If newErr is nil, os.Stderr is used. +func (c *Command) SetErr(newErr io.Writer) { + c.errWriter = newErr +} + +// SetIn sets the source for input data +// If newIn is nil, os.Stdin is used. +func (c *Command) SetIn(newIn io.Reader) { + c.inReader = newIn +} + +// SetUsageFunc sets usage function. Usage can be defined by application. +func (c *Command) SetUsageFunc(f func(*Command) error) { + c.usageFunc = f +} + +// SetUsageTemplate sets usage template. Can be defined by Application. +func (c *Command) SetUsageTemplate(s string) { + c.usageTemplate = s +} + +// SetFlagErrorFunc sets a function to generate an error when flag parsing +// fails. +func (c *Command) SetFlagErrorFunc(f func(*Command, error) error) { + c.flagErrorFunc = f +} + +// SetHelpFunc sets help function. Can be defined by Application. +func (c *Command) SetHelpFunc(f func(*Command, []string)) { + c.helpFunc = f +} + +// SetHelpCommand sets help command. +func (c *Command) SetHelpCommand(cmd *Command) { + c.helpCommand = cmd +} + +// SetHelpCommandGroupID sets the group id of the help command. +func (c *Command) SetHelpCommandGroupID(groupID string) { + if c.helpCommand != nil { + c.helpCommand.GroupID = groupID + } + // helpCommandGroupID is used if no helpCommand is defined by the user + c.helpCommandGroupID = groupID +} + +// SetCompletionCommandGroupID sets the group id of the completion command. +func (c *Command) SetCompletionCommandGroupID(groupID string) { + // completionCommandGroupID is used if no completion command is defined by the user + c.Root().completionCommandGroupID = groupID +} + +// SetHelpTemplate sets help template to be used. Application can use it to set custom template. +func (c *Command) SetHelpTemplate(s string) { + c.helpTemplate = s +} + +// SetVersionTemplate sets version template to be used. Application can use it to set custom template. +func (c *Command) SetVersionTemplate(s string) { + c.versionTemplate = s +} + +// SetErrPrefix sets error message prefix to be used. Application can use it to set custom prefix. +func (c *Command) SetErrPrefix(s string) { + c.errPrefix = s +} + +// SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. +// The user should not have a cyclic dependency on commands. +func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { + c.Flags().SetNormalizeFunc(n) + c.PersistentFlags().SetNormalizeFunc(n) + c.globNormFunc = n + + for _, command := range c.commands { + command.SetGlobalNormalizationFunc(n) + } +} + +// OutOrStdout returns output to stdout. +func (c *Command) OutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +// OutOrStderr returns output to stderr +func (c *Command) OutOrStderr() io.Writer { + return c.getOut(os.Stderr) +} + +// ErrOrStderr returns output to stderr +func (c *Command) ErrOrStderr() io.Writer { + return c.getErr(os.Stderr) +} + +// InOrStdin returns input to stdin +func (c *Command) InOrStdin() io.Reader { + return c.getIn(os.Stdin) +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.outWriter != nil { + return c.outWriter + } + if c.HasParent() { + return c.parent.getOut(def) + } + return def +} + +func (c *Command) getErr(def io.Writer) io.Writer { + if c.errWriter != nil { + return c.errWriter + } + if c.HasParent() { + return c.parent.getErr(def) + } + return def +} + +func (c *Command) getIn(def io.Reader) io.Reader { + if c.inReader != nil { + return c.inReader + } + if c.HasParent() { + return c.parent.getIn(def) + } + return def +} + +// UsageFunc returns either the function set by SetUsageFunc for this command +// or a parent, or it returns a default usage function. +func (c *Command) UsageFunc() (f func(*Command) error) { + if c.usageFunc != nil { + return c.usageFunc + } + if c.HasParent() { + return c.Parent().UsageFunc() + } + return func(c *Command) error { + c.mergePersistentFlags() + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + if err != nil { + c.PrintErrln(err) + } + return err + } +} + +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. +func (c *Command) Usage() error { + return c.UsageFunc()(c) +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function with default help behavior. +func (c *Command) HelpFunc() func(*Command, []string) { + if c.helpFunc != nil { + return c.helpFunc + } + if c.HasParent() { + return c.Parent().HelpFunc() + } + return func(c *Command, a []string) { + c.mergePersistentFlags() + // The help should be sent to stdout + // See https://github.com/spf13/cobra/issues/1002 + err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + if err != nil { + c.PrintErrln(err) + } + } +} + +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. +func (c *Command) Help() error { + c.HelpFunc()(c, []string{}) + return nil +} + +// UsageString returns usage string. +func (c *Command) UsageString() string { + // Storing normal writers + tmpOutput := c.outWriter + tmpErr := c.errWriter + + bb := new(bytes.Buffer) + c.outWriter = bb + c.errWriter = bb + + CheckErr(c.Usage()) + + // Setting things back to normal + c.outWriter = tmpOutput + c.errWriter = tmpErr + + return bb.String() +} + +// FlagErrorFunc returns either the function set by SetFlagErrorFunc for this +// command or a parent, or it returns a function which returns the original +// error. +func (c *Command) FlagErrorFunc() (f func(*Command, error) error) { + if c.flagErrorFunc != nil { + return c.flagErrorFunc + } + + if c.HasParent() { + return c.parent.FlagErrorFunc() + } + return func(c *Command, err error) error { + return err + } +} + +var minUsagePadding = 25 + +// UsagePadding return padding for the usage. +func (c *Command) UsagePadding() int { + if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { + return minUsagePadding + } + return c.parent.commandsMaxUseLen +} + +var minCommandPathPadding = 11 + +// CommandPathPadding return padding for the command path. +func (c *Command) CommandPathPadding() int { + if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { + return minCommandPathPadding + } + return c.parent.commandsMaxCommandPathLen +} + +var minNamePadding = 11 + +// NamePadding returns padding for the name. +func (c *Command) NamePadding() int { + if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { + return minNamePadding + } + return c.parent.commandsMaxNameLen +} + +// UsageTemplate returns usage template for the command. +func (c *Command) UsageTemplate() string { + if c.usageTemplate != "" { + return c.usageTemplate + } + + if c.HasParent() { + return c.parent.UsageTemplate() + } + return `Usage:{{if .Runnable}} + {{.UseLine}}{{end}}{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}} + +Aliases: + {{.NameAndAliases}}{{end}}{{if .HasExample}} + +Examples: +{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{$cmds := .Commands}}{{if eq (len .Groups) 0}} + +Available Commands:{{range $cmds}}{{if (or .IsAvailableCommand (eq .Name "help"))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{else}}{{range $group := .Groups}} + +{{.Title}}{{range $cmds}}{{if (and (eq .GroupID $group.ID) (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if not .AllChildCommandsHaveGroup}} + +Additional Commands:{{range $cmds}}{{if (and (eq .GroupID "") (or .IsAvailableCommand (eq .Name "help")))}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}} + +Flags: +{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}} + +Global Flags: +{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}} + +Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` +} + +// HelpTemplate return help template for the command. +func (c *Command) HelpTemplate() string { + if c.helpTemplate != "" { + return c.helpTemplate + } + + if c.HasParent() { + return c.parent.HelpTemplate() + } + return `{{with (or .Long .Short)}}{{. | trimTrailingWhitespaces}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// VersionTemplate return version template for the command. +func (c *Command) VersionTemplate() string { + if c.versionTemplate != "" { + return c.versionTemplate + } + + if c.HasParent() { + return c.parent.VersionTemplate() + } + return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` +} + +// ErrPrefix return error message prefix for the command +func (c *Command) ErrPrefix() string { + if c.errPrefix != "" { + return c.errPrefix + } + + if c.HasParent() { + return c.parent.ErrPrefix() + } + return "Error:" +} + +func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { + flag := fs.Lookup(name) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func shortHasNoOptDefVal(name string, fs *flag.FlagSet) bool { + if len(name) == 0 { + return false + } + + flag := fs.ShorthandLookup(name[:1]) + if flag == nil { + return false + } + return flag.NoOptDefVal != "" +} + +func stripFlags(args []string, c *Command) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + + commands := []string{} + flags := c.Flags() + +Loop: + for len(args) > 0 { + s := args[0] + args = args[1:] + switch { + case s == "--": + // "--" terminates the flags + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + // If '--flag arg' then + // delete arg from args. + fallthrough // (do the same as below) + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // If '-f arg' then + // delete 'arg' from args or break the loop if len(args) <= 1. + if len(args) <= 1 { + break Loop + } else { + args = args[1:] + continue + } + case s != "" && !strings.HasPrefix(s, "-"): + commands = append(commands, s) + } + } + + return commands +} + +// argsMinusFirstX removes only the first x from args. Otherwise, commands that look like +// openshift admin policy add-role-to-user admin my-user, lose the admin argument (arg[4]). +// Special care needs to be taken not to remove a flag value. +func (c *Command) argsMinusFirstX(args []string, x string) []string { + if len(args) == 0 { + return args + } + c.mergePersistentFlags() + flags := c.Flags() + +Loop: + for pos := 0; pos < len(args); pos++ { + s := args[pos] + switch { + case s == "--": + // -- means we have reached the end of the parseable args. Break out of the loop now. + break Loop + case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): + fallthrough + case strings.HasPrefix(s, "-") && !strings.Contains(s, "=") && len(s) == 2 && !shortHasNoOptDefVal(s[1:], flags): + // This is a flag without a default value, and an equal sign is not used. Increment pos in order to skip + // over the next arg, because that is the value of this flag. + pos++ + continue + case !strings.HasPrefix(s, "-"): + // This is not a flag or a flag value. Check to see if it matches what we're looking for, and if so, + // return the args, excluding the one at this position. + if s == x { + ret := []string{} + ret = append(ret, args[:pos]...) + ret = append(ret, args[pos+1:]...) + return ret + } + } + } + return args +} + +func isFlagArg(arg string) bool { + return ((len(arg) >= 3 && arg[0:2] == "--") || + (len(arg) >= 2 && arg[0] == '-' && arg[1] != '-')) +} + +// Find the target command given the args and command tree +// Meant to be run on the highest node. Only searches down. +func (c *Command) Find(args []string) (*Command, []string, error) { + var innerfind func(*Command, []string) (*Command, []string) + + innerfind = func(c *Command, innerArgs []string) (*Command, []string) { + argsWOflags := stripFlags(innerArgs, c) + if len(argsWOflags) == 0 { + return c, innerArgs + } + nextSubCmd := argsWOflags[0] + + cmd := c.findNext(nextSubCmd) + if cmd != nil { + return innerfind(cmd, c.argsMinusFirstX(innerArgs, nextSubCmd)) + } + return c, innerArgs + } + + commandFound, a := innerfind(c, args) + if commandFound.Args == nil { + return commandFound, a, legacyArgs(commandFound, stripFlags(a, commandFound)) + } + return commandFound, a, nil +} + +func (c *Command) findSuggestions(arg string) string { + if c.DisableSuggestions { + return "" + } + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + suggestionsString := "" + if suggestions := c.SuggestionsFor(arg); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + return suggestionsString +} + +func (c *Command) findNext(next string) *Command { + matches := make([]*Command, 0) + for _, cmd := range c.commands { + if commandNameMatches(cmd.Name(), next) || cmd.HasAlias(next) { + cmd.commandCalledAs.name = next + return cmd + } + if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { + matches = append(matches, cmd) + } + } + + if len(matches) == 1 { + // Temporarily disable gosec G602, which produces a false positive. + // See https://github.com/securego/gosec/issues/1005. + return matches[0] // #nosec G602 + } + + return nil +} + +// Traverse the command tree to find the command, and parse args for +// each parent. +func (c *Command) Traverse(args []string) (*Command, []string, error) { + flags := []string{} + inFlag := false + + for i, arg := range args { + switch { + // A long flag with a space separated value + case strings.HasPrefix(arg, "--") && !strings.Contains(arg, "="): + // TODO: this isn't quite right, we should really check ahead for 'true' or 'false' + inFlag = !hasNoOptDefVal(arg[2:], c.Flags()) + flags = append(flags, arg) + continue + // A short flag with a space separated value + case strings.HasPrefix(arg, "-") && !strings.Contains(arg, "=") && len(arg) == 2 && !shortHasNoOptDefVal(arg[1:], c.Flags()): + inFlag = true + flags = append(flags, arg) + continue + // The value for a flag + case inFlag: + inFlag = false + flags = append(flags, arg) + continue + // A flag without a value, or with an `=` separated value + case isFlagArg(arg): + flags = append(flags, arg) + continue + } + + cmd := c.findNext(arg) + if cmd == nil { + return c, args, nil + } + + if err := c.ParseFlags(flags); err != nil { + return nil, args, err + } + return cmd.Traverse(args[i+1:]) + } + return c, args, nil +} + +// SuggestionsFor provides suggestions for the typedName. +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +// VisitParents visits all parents of the command and invokes fn on each parent. +func (c *Command) VisitParents(fn func(*Command)) { + if c.HasParent() { + fn(c.Parent()) + c.Parent().VisitParents(fn) + } +} + +// Root finds root command. +func (c *Command) Root() *Command { + if c.HasParent() { + return c.Parent().Root() + } + return c +} + +// ArgsLenAtDash will return the length of c.Flags().Args at the moment +// when a -- was found during args parsing. +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + +func (c *Command) execute(a []string) (err error) { + if c == nil { + return fmt.Errorf("Called Execute() on a nil Command") + } + + if len(c.Deprecated) > 0 { + c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) + } + + // initialize help and version flag at the last point possible to allow for user + // overriding + c.InitDefaultHelpFlag() + c.InitDefaultVersionFlag() + + err = c.ParseFlags(a) + if err != nil { + return c.FlagErrorFunc()(c, err) + } + + // If help is called, regardless of other flags, return we want help. + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in InitDefaultHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + + if helpVal { + return flag.ErrHelp + } + + // for back-compat, only add version flag behavior if version is defined + if c.Version != "" { + versionVal, err := c.Flags().GetBool("version") + if err != nil { + c.Println("\"version\" flag declared as non-bool. Please correct your code") + return err + } + if versionVal { + err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } + } + + if !c.Runnable() { + return flag.ErrHelp + } + + c.preRun() + + defer c.postRun() + + argWoFlags := c.Flags().Args() + if c.DisableFlagParsing { + argWoFlags = a + } + + if err := c.ValidateArgs(argWoFlags); err != nil { + return err + } + + parents := make([]*Command, 0, 5) + for p := c; p != nil; p = p.Parent() { + if EnableTraverseRunHooks { + // When EnableTraverseRunHooks is set: + // - Execute all persistent pre-runs from the root parent till this command. + // - Execute all persistent post-runs from this command till the root parent. + parents = append([]*Command{p}, parents...) + } else { + // Otherwise, execute only the first found persistent hook. + parents = append(parents, p) + } + } + for _, p := range parents { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + if !EnableTraverseRunHooks { + break + } + } else if p.PersistentPreRun != nil { + p.PersistentPreRun(c, argWoFlags) + if !EnableTraverseRunHooks { + break + } + } + } + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { + c.PreRun(c, argWoFlags) + } + + if err := c.ValidateRequiredFlags(); err != nil { + return err + } + if err := c.ValidateFlagGroups(); err != nil { + return err + } + + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { + c.PostRun(c, argWoFlags) + } + for p := c; p != nil; p = p.Parent() { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + if !EnableTraverseRunHooks { + break + } + } else if p.PersistentPostRun != nil { + p.PersistentPostRun(c, argWoFlags) + if !EnableTraverseRunHooks { + break + } + } + } + + return nil +} + +func (c *Command) preRun() { + for _, x := range initializers { + x() + } +} + +func (c *Command) postRun() { + for _, x := range finalizers { + x() + } +} + +// ExecuteContext is the same as Execute(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContext(ctx context.Context) error { + c.ctx = ctx + return c.Execute() +} + +// Execute uses the args (os.Args[1:] by default) +// and run through the command tree finding appropriate matches +// for commands and then corresponding flags. +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +// ExecuteContextC is the same as ExecuteC(), but sets the ctx on the command. +// Retrieve ctx by calling cmd.Context() inside your *Run lifecycle or ValidArgs +// functions. +func (c *Command) ExecuteContextC(ctx context.Context) (*Command, error) { + c.ctx = ctx + return c.ExecuteC() +} + +// ExecuteC executes the command. +func (c *Command) ExecuteC() (cmd *Command, err error) { + if c.ctx == nil { + c.ctx = context.Background() + } + + // Regardless of what command execute is called on, run on Root only + if c.HasParent() { + return c.Root().ExecuteC() + } + + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) + } + + // initialize help at the last point to allow for user overriding + c.InitDefaultHelpCmd() + // initialize completion at the last point to allow for user overriding + c.InitDefaultCompletionCmd() + + // Now that all commands have been created, let's make sure all groups + // are properly created also + c.checkCommandGroups() + + args := c.args + + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { + args = os.Args[1:] + } + + // initialize the hidden command to be used for shell completion + c.initCompleteCmd(args) + + var flags []string + if c.TraverseChildren { + cmd, flags, err = c.Traverse(args) + } else { + cmd, flags, err = c.Find(args) + } + if err != nil { + // If found parse to a subcommand and then failed, talk about the subcommand + if cmd != nil { + c = cmd + } + if !c.SilenceErrors { + c.PrintErrln(c.ErrPrefix(), err.Error()) + c.PrintErrf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err + } + + cmd.commandCalledAs.called = true + if cmd.commandCalledAs.name == "" { + cmd.commandCalledAs.name = cmd.Name() + } + + // We have to pass global context to children command + // if context is present on the parent command. + if cmd.ctx == nil { + cmd.ctx = c.ctx + } + + err = cmd.execute(flags) + if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect + if errors.Is(err, flag.ErrHelp) { + cmd.HelpFunc()(cmd, args) + return cmd, nil + } + + // If root command has SilenceErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.PrintErrln(cmd.ErrPrefix(), err.Error()) + } + + // If root command has SilenceUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + } + return cmd, err +} + +func (c *Command) ValidateArgs(args []string) error { + if c.Args == nil { + return ArbitraryArgs(c, args) + } + return c.Args(c, args) +} + +// ValidateRequiredFlags validates all required flags are present and returns an error otherwise +func (c *Command) ValidateRequiredFlags() error { + if c.DisableFlagParsing { + return nil + } + + flags := c.Flags() + missingFlagNames := []string{} + flags.VisitAll(func(pflag *flag.Flag) { + requiredAnnotation, found := pflag.Annotations[BashCompOneRequiredFlag] + if !found { + return + } + if (requiredAnnotation[0] == "true") && !pflag.Changed { + missingFlagNames = append(missingFlagNames, pflag.Name) + } + }) + + if len(missingFlagNames) > 0 { + return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) + } + return nil +} + +// checkCommandGroups checks if a command has been added to a group that does not exists. +// If so, we panic because it indicates a coding error that should be corrected. +func (c *Command) checkCommandGroups() { + for _, sub := range c.commands { + // if Group is not defined let the developer know right away + if sub.GroupID != "" && !c.ContainsGroup(sub.GroupID) { + panic(fmt.Sprintf("group id '%s' is not defined for subcommand '%s'", sub.GroupID, sub.CommandPath())) + } + + sub.checkCommandGroups() + } +} + +// InitDefaultHelpFlag adds default help flag to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help flag, it will do nothing. +func (c *Command) InitDefaultHelpFlag() { + c.mergePersistentFlags() + if c.Flags().Lookup("help") == nil { + usage := "help for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().BoolP("help", "h", false, usage) + _ = c.Flags().SetAnnotation("help", FlagSetByCobraAnnotation, []string{"true"}) + } +} + +// InitDefaultVersionFlag adds default version flag to c. +// It is called automatically by executing the c. +// If c already has a version flag, it will do nothing. +// If c.Version is empty, it will do nothing. +func (c *Command) InitDefaultVersionFlag() { + if c.Version == "" { + return + } + + c.mergePersistentFlags() + if c.Flags().Lookup("version") == nil { + usage := "version for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + if c.Flags().ShorthandLookup("v") == nil { + c.Flags().BoolP("version", "v", false, usage) + } else { + c.Flags().Bool("version", false, usage) + } + _ = c.Flags().SetAnnotation("version", FlagSetByCobraAnnotation, []string{"true"}) + } +} + +// InitDefaultHelpCmd adds default help command to c. +// It is called automatically by executing the c or by calling help and usage. +// If c already has help command or c has no subcommands, it will do nothing. +func (c *Command) InitDefaultHelpCmd() { + if !c.HasSubCommands() { + return + } + + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. +Simply type ` + c.Name() + ` help [path to command] for full details.`, + ValidArgsFunction: func(c *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + var completions []string + cmd, _, e := c.Root().Find(args) + if e != nil { + return nil, ShellCompDirectiveNoFileComp + } + if cmd == nil { + // Root help command. + cmd = c.Root() + } + for _, subCmd := range cmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == cmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + } + } + return completions, ShellCompDirectiveNoFileComp + }, + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q\n", args) + CheckErr(c.Root().Usage()) + } else { + cmd.InitDefaultHelpFlag() // make possible 'help' flag to be shown + cmd.InitDefaultVersionFlag() // make possible 'version' flag to be shown + CheckErr(cmd.Help()) + } + }, + GroupID: c.helpCommandGroupID, + } + } + c.RemoveCommand(c.helpCommand) + c.AddCommand(c.helpCommand) +} + +// ResetCommands delete parent, subcommand and help command from c. +func (c *Command) ResetCommands() { + c.parent = nil + c.commands = nil + c.helpCommand = nil + c.parentsPflags = nil +} + +// Sorts commands by their names. +type commandSorterByName []*Command + +func (c commandSorterByName) Len() int { return len(c) } +func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } + +// Commands returns a sorted slice of child commands. +func (c *Command) Commands() []*Command { + // do not sort commands if it already sorted or sorting was disabled + if EnableCommandSorting && !c.commandsAreSorted { + sort.Sort(commandSorterByName(c.commands)) + c.commandsAreSorted = true + } + return c.commands +} + +// AddCommand adds one or more commands to this parent command. +func (c *Command) AddCommand(cmds ...*Command) { + for i, x := range cmds { + if cmds[i] == c { + panic("Command can't be a child of itself") + } + cmds[i].parent = c + // update max lengths + usageLen := len(x.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(x.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(x.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + // If global normalization function exists, update all children + if c.globNormFunc != nil { + x.SetGlobalNormalizationFunc(c.globNormFunc) + } + c.commands = append(c.commands, x) + c.commandsAreSorted = false + } +} + +// Groups returns a slice of child command groups. +func (c *Command) Groups() []*Group { + return c.commandgroups +} + +// AllChildCommandsHaveGroup returns if all subcommands are assigned to a group +func (c *Command) AllChildCommandsHaveGroup() bool { + for _, sub := range c.commands { + if (sub.IsAvailableCommand() || sub == c.helpCommand) && sub.GroupID == "" { + return false + } + } + return true +} + +// ContainsGroup return if groupID exists in the list of command groups. +func (c *Command) ContainsGroup(groupID string) bool { + for _, x := range c.commandgroups { + if x.ID == groupID { + return true + } + } + return false +} + +// AddGroup adds one or more command groups to this parent command. +func (c *Command) AddGroup(groups ...*Group) { + c.commandgroups = append(c.commandgroups, groups...) +} + +// RemoveCommand removes one or more commands from a parent command. +func (c *Command) RemoveCommand(cmds ...*Command) { + commands := []*Command{} +main: + for _, command := range c.commands { + for _, cmd := range cmds { + if command == cmd { + command.parent = nil + continue main + } + } + commands = append(commands, command) + } + c.commands = commands + // recompute all lengths + c.commandsMaxUseLen = 0 + c.commandsMaxCommandPathLen = 0 + c.commandsMaxNameLen = 0 + for _, command := range c.commands { + usageLen := len(command.Use) + if usageLen > c.commandsMaxUseLen { + c.commandsMaxUseLen = usageLen + } + commandPathLen := len(command.CommandPath()) + if commandPathLen > c.commandsMaxCommandPathLen { + c.commandsMaxCommandPathLen = commandPathLen + } + nameLen := len(command.Name()) + if nameLen > c.commandsMaxNameLen { + c.commandsMaxNameLen = nameLen + } + } +} + +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. +func (c *Command) Print(i ...interface{}) { + fmt.Fprint(c.OutOrStderr(), i...) +} + +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. +func (c *Command) Println(i ...interface{}) { + c.Print(fmt.Sprintln(i...)) +} + +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. +func (c *Command) Printf(format string, i ...interface{}) { + c.Print(fmt.Sprintf(format, i...)) +} + +// PrintErr is a convenience method to Print to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErr(i ...interface{}) { + fmt.Fprint(c.ErrOrStderr(), i...) +} + +// PrintErrln is a convenience method to Println to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrln(i ...interface{}) { + c.PrintErr(fmt.Sprintln(i...)) +} + +// PrintErrf is a convenience method to Printf to the defined Err output, fallback to Stderr if not set. +func (c *Command) PrintErrf(format string, i ...interface{}) { + c.PrintErr(fmt.Sprintf(format, i...)) +} + +// CommandPath returns the full path to this command. +func (c *Command) CommandPath() string { + if c.HasParent() { + return c.Parent().CommandPath() + " " + c.Name() + } + if displayName, ok := c.Annotations[CommandDisplayNameAnnotation]; ok { + return displayName + } + return c.Name() +} + +// UseLine puts out the full usage for a given command (including parents). +func (c *Command) UseLine() string { + var useline string + if c.HasParent() { + useline = c.parent.CommandPath() + " " + c.Use + } else { + useline = c.Use + } + if c.DisableFlagsInUseLine { + return useline + } + if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { + useline += " [flags]" + } + return useline +} + +// DebugFlags used to determine which flags have been assigned to which commands +// and which persist. +// nolint:goconst +func (c *Command) DebugFlags() { + c.Println("DebugFlags called on", c.Name()) + var debugflags func(*Command) + + debugflags = func(x *Command) { + if x.HasFlags() || x.HasPersistentFlags() { + c.Println(x.Name()) + } + if x.HasFlags() { + x.flags.VisitAll(func(f *flag.Flag) { + if x.HasPersistentFlags() && x.persistentFlag(f.Name) != nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [LP]") + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [L]") + } + }) + } + if x.HasPersistentFlags() { + x.pflags.VisitAll(func(f *flag.Flag) { + if x.HasFlags() { + if x.flags.Lookup(f.Name) == nil { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + } else { + c.Println(" -"+f.Shorthand+",", "--"+f.Name, "["+f.DefValue+"]", "", f.Value, " [P]") + } + }) + } + c.Println(x.flagErrorBuf) + if x.HasSubCommands() { + for _, y := range x.commands { + debugflags(y) + } + } + } + + debugflags(c) +} + +// Name returns the command's name: the first word in the use line. +func (c *Command) Name() string { + name := c.Use + i := strings.Index(name, " ") + if i >= 0 { + name = name[:i] + } + return name +} + +// HasAlias determines if a given string is an alias of the command. +func (c *Command) HasAlias(s string) bool { + for _, a := range c.Aliases { + if commandNameMatches(a, s) { + return true + } + } + return false +} + +// CalledAs returns the command name or alias that was used to invoke +// this command or an empty string if the command has not been called. +func (c *Command) CalledAs() string { + if c.commandCalledAs.called { + return c.commandCalledAs.name + } + return "" +} + +// hasNameOrAliasPrefix returns true if the Name or any of aliases start +// with prefix +func (c *Command) hasNameOrAliasPrefix(prefix string) bool { + if strings.HasPrefix(c.Name(), prefix) { + c.commandCalledAs.name = c.Name() + return true + } + for _, alias := range c.Aliases { + if strings.HasPrefix(alias, prefix) { + c.commandCalledAs.name = alias + return true + } + } + return false +} + +// NameAndAliases returns a list of the command name and all aliases +func (c *Command) NameAndAliases() string { + return strings.Join(append([]string{c.Name()}, c.Aliases...), ", ") +} + +// HasExample determines if the command has example. +func (c *Command) HasExample() bool { + return len(c.Example) > 0 +} + +// Runnable determines if the command is itself runnable. +func (c *Command) Runnable() bool { + return c.Run != nil || c.RunE != nil +} + +// HasSubCommands determines if the command has children commands. +func (c *Command) HasSubCommands() bool { + return len(c.commands) > 0 +} + +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands). +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { + return false + } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsAdditionalHelpTopicCommand determines if a command is an additional +// help topic command; additional help topic command is determined by the +// fact that it is NOT runnable/hidden/deprecated, and has no sub commands that +// are runnable/hidden/deprecated. +// Concrete example: https://github.com/spf13/cobra/issues/393#issuecomment-282741924. +func (c *Command) IsAdditionalHelpTopicCommand() bool { + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command + for _, sub := range c.commands { + if !sub.IsAdditionalHelpTopicCommand() { + return false + } + } + + // the command either has no sub commands, or no non-help sub commands + return true +} + +// HasHelpSubCommands determines if a command has any available 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics'. +func (c *Command) HasHelpSubCommands() bool { + // return true on the first found available 'help' sub command + for _, sub := range c.commands { + if sub.IsAdditionalHelpTopicCommand() { + return true + } + } + + // the command either has no sub commands, or no available 'help' sub commands + return false +} + +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands'. +func (c *Command) HasAvailableSubCommands() bool { + // return true on the first found available (non deprecated/help/hidden) + // sub command + for _, sub := range c.commands { + if sub.IsAvailableCommand() { + return true + } + } + + // the command either has no sub commands, or no available (non deprecated/help/hidden) + // sub commands + return false +} + +// HasParent determines if the command is a child command. +func (c *Command) HasParent() bool { + return c.parent != nil +} + +// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. +func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { + return c.globNormFunc +} + +// Flags returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). +func (c *Command) Flags() *flag.FlagSet { + if c.flags == nil { + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.flags.SetOutput(c.flagErrorBuf) + } + + return c.flags +} + +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { + persistentFlags := c.PersistentFlags() + + out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.LocalFlags().VisitAll(func(f *flag.Flag) { + if persistentFlags.Lookup(f.Name) == nil { + out.AddFlag(f) + } + }) + return out +} + +// LocalFlags returns the local FlagSet specifically set in the current command. +func (c *Command) LocalFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.lflags == nil { + c.lflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.lflags.SetOutput(c.flagErrorBuf) + } + c.lflags.SortFlags = c.Flags().SortFlags + if c.globNormFunc != nil { + c.lflags.SetNormalizeFunc(c.globNormFunc) + } + + addToLocal := func(f *flag.Flag) { + // Add the flag if it is not a parent PFlag, or it shadows a parent PFlag + if c.lflags.Lookup(f.Name) == nil && f != c.parentsPflags.Lookup(f.Name) { + c.lflags.AddFlag(f) + } + } + c.Flags().VisitAll(addToLocal) + c.PersistentFlags().VisitAll(addToLocal) + return c.lflags +} + +// InheritedFlags returns all flags which were inherited from parent commands. +func (c *Command) InheritedFlags() *flag.FlagSet { + c.mergePersistentFlags() + + if c.iflags == nil { + c.iflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.iflags.SetOutput(c.flagErrorBuf) + } + + local := c.LocalFlags() + if c.globNormFunc != nil { + c.iflags.SetNormalizeFunc(c.globNormFunc) + } + + c.parentsPflags.VisitAll(func(f *flag.Flag) { + if c.iflags.Lookup(f.Name) == nil && local.Lookup(f.Name) == nil { + c.iflags.AddFlag(f) + } + }) + return c.iflags +} + +// NonInheritedFlags returns all flags which were not inherited from parent commands. +func (c *Command) NonInheritedFlags() *flag.FlagSet { + return c.LocalFlags() +} + +// PersistentFlags returns the persistent FlagSet specifically set in the current command. +func (c *Command) PersistentFlags() *flag.FlagSet { + if c.pflags == nil { + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + c.pflags.SetOutput(c.flagErrorBuf) + } + return c.pflags +} + +// ResetFlags deletes all flags from command. +func (c *Command) ResetFlags() { + c.flagErrorBuf = new(bytes.Buffer) + c.flagErrorBuf.Reset() + c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.flags.SetOutput(c.flagErrorBuf) + c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.pflags.SetOutput(c.flagErrorBuf) + + c.lflags = nil + c.iflags = nil + c.parentsPflags = nil +} + +// HasFlags checks if the command contains any flags (local plus persistent from the entire structure). +func (c *Command) HasFlags() bool { + return c.Flags().HasFlags() +} + +// HasPersistentFlags checks if the command contains persistent flags. +func (c *Command) HasPersistentFlags() bool { + return c.PersistentFlags().HasFlags() +} + +// HasLocalFlags checks if the command has flags specifically declared locally. +func (c *Command) HasLocalFlags() bool { + return c.LocalFlags().HasFlags() +} + +// HasInheritedFlags checks if the command has flags inherited from its parent command. +func (c *Command) HasInheritedFlags() bool { + return c.InheritedFlags().HasFlags() +} + +// HasAvailableFlags checks if the command contains any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. +func (c *Command) HasAvailableFlags() bool { + return c.Flags().HasAvailableFlags() +} + +// HasAvailablePersistentFlags checks if the command contains persistent flags which are not hidden or deprecated. +func (c *Command) HasAvailablePersistentFlags() bool { + return c.PersistentFlags().HasAvailableFlags() +} + +// HasAvailableLocalFlags checks if the command has flags specifically declared locally which are not hidden +// or deprecated. +func (c *Command) HasAvailableLocalFlags() bool { + return c.LocalFlags().HasAvailableFlags() +} + +// HasAvailableInheritedFlags checks if the command has flags inherited from its parent command which are +// not hidden or deprecated. +func (c *Command) HasAvailableInheritedFlags() bool { + return c.InheritedFlags().HasAvailableFlags() +} + +// Flag climbs up the command tree looking for matching flag. +func (c *Command) Flag(name string) (flag *flag.Flag) { + flag = c.Flags().Lookup(name) + + if flag == nil { + flag = c.persistentFlag(name) + } + + return +} + +// Recursively find matching persistent flag. +func (c *Command) persistentFlag(name string) (flag *flag.Flag) { + if c.HasPersistentFlags() { + flag = c.PersistentFlags().Lookup(name) + } + + if flag == nil { + c.updateParentsPflags() + flag = c.parentsPflags.Lookup(name) + } + return +} + +// ParseFlags parses persistent flag tree and local flags. +func (c *Command) ParseFlags(args []string) error { + if c.DisableFlagParsing { + return nil + } + + if c.flagErrorBuf == nil { + c.flagErrorBuf = new(bytes.Buffer) + } + beforeErrorBufLen := c.flagErrorBuf.Len() + c.mergePersistentFlags() + + // do it here after merging all flags and just before parse + c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + + err := c.Flags().Parse(args) + // Print warnings if they occurred (e.g. deprecated flag messages). + if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { + c.Print(c.flagErrorBuf.String()) + } + + return err +} + +// Parent returns a commands parent command. +func (c *Command) Parent() *Command { + return c.parent +} + +// mergePersistentFlags merges c.PersistentFlags() to c.Flags() +// and adds missing persistent flags of all parents. +func (c *Command) mergePersistentFlags() { + c.updateParentsPflags() + c.Flags().AddFlagSet(c.PersistentFlags()) + c.Flags().AddFlagSet(c.parentsPflags) +} + +// updateParentsPflags updates c.parentsPflags by adding +// new persistent flags of all parents. +// If c.parentsPflags == nil, it makes new. +func (c *Command) updateParentsPflags() { + if c.parentsPflags == nil { + c.parentsPflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.parentsPflags.SetOutput(c.flagErrorBuf) + c.parentsPflags.SortFlags = false + } + + if c.globNormFunc != nil { + c.parentsPflags.SetNormalizeFunc(c.globNormFunc) + } + + c.Root().PersistentFlags().AddFlagSet(flag.CommandLine) + + c.VisitParents(func(parent *Command) { + c.parentsPflags.AddFlagSet(parent.PersistentFlags()) + }) +} + +// commandNameMatches checks if two command names are equal +// taking into account case sensitivity according to +// EnableCaseInsensitive global configuration. +func commandNameMatches(s string, t string) bool { + if EnableCaseInsensitive { + return strings.EqualFold(s, t) + } + + return s == t +} diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 0000000000..307f0c127f --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,20 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 0000000000..adbef395c2 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,41 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows +// +build windows + +package cobra + +import ( + "fmt" + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +func preExecHook(c *Command) { + if MousetrapHelpText != "" && mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + if MousetrapDisplayDuration > 0 { + time.Sleep(MousetrapDisplayDuration) + } else { + c.Println("Press return to continue...") + fmt.Scanln() + } + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/cobra/completions.go b/vendor/github.com/spf13/cobra/completions.go new file mode 100644 index 0000000000..b60f6b2000 --- /dev/null +++ b/vendor/github.com/spf13/cobra/completions.go @@ -0,0 +1,901 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "os" + "strings" + "sync" + + "github.com/spf13/pflag" +) + +const ( + // ShellCompRequestCmd is the name of the hidden command that is used to request + // completion results from the program. It is used by the shell completion scripts. + ShellCompRequestCmd = "__complete" + // ShellCompNoDescRequestCmd is the name of the hidden command that is used to request + // completion results without their description. It is used by the shell completion scripts. + ShellCompNoDescRequestCmd = "__completeNoDesc" +) + +// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it. +var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){} + +// lock for reading and writing from flagCompletionFunctions +var flagCompletionMutex = &sync.RWMutex{} + +// ShellCompDirective is a bit map representing the different behaviors the shell +// can be instructed to have once completions have been provided. +type ShellCompDirective int + +type flagCompError struct { + subCommand string + flagName string +} + +func (e *flagCompError) Error() string { + return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'" +} + +const ( + // ShellCompDirectiveError indicates an error occurred and completions should be ignored. + ShellCompDirectiveError ShellCompDirective = 1 << iota + + // ShellCompDirectiveNoSpace indicates that the shell should not add a space + // after the completion even if there is a single completion provided. + ShellCompDirectiveNoSpace + + // ShellCompDirectiveNoFileComp indicates that the shell should not provide + // file completion even when no completion is provided. + ShellCompDirectiveNoFileComp + + // ShellCompDirectiveFilterFileExt indicates that the provided completions + // should be used as file extension filters. + // For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename() + // is a shortcut to using this directive explicitly. The BashCompFilenameExt + // annotation can also be used to obtain the same behavior for flags. + ShellCompDirectiveFilterFileExt + + // ShellCompDirectiveFilterDirs indicates that only directory names should + // be provided in file completion. To request directory names within another + // directory, the returned completions should specify the directory within + // which to search. The BashCompSubdirsInDir annotation can be used to + // obtain the same behavior but only for flags. + ShellCompDirectiveFilterDirs + + // ShellCompDirectiveKeepOrder indicates that the shell should preserve the order + // in which the completions are provided + ShellCompDirectiveKeepOrder + + // =========================================================================== + + // All directives using iota should be above this one. + // For internal use. + shellCompDirectiveMaxValue + + // ShellCompDirectiveDefault indicates to let the shell perform its default + // behavior after completions have been provided. + // This one must be last to avoid messing up the iota count. + ShellCompDirectiveDefault ShellCompDirective = 0 +) + +const ( + // Constants for the completion command + compCmdName = "completion" + compCmdNoDescFlagName = "no-descriptions" + compCmdNoDescFlagDesc = "disable completion descriptions" + compCmdNoDescFlagDefault = false +) + +// CompletionOptions are the options to control shell completion +type CompletionOptions struct { + // DisableDefaultCmd prevents Cobra from creating a default 'completion' command + DisableDefaultCmd bool + // DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag + // for shells that support completion descriptions + DisableNoDescFlag bool + // DisableDescriptions turns off all completion descriptions for shells + // that support them + DisableDescriptions bool + // HiddenDefaultCmd makes the default 'completion' command hidden + HiddenDefaultCmd bool +} + +// NoFileCompletions can be used to disable file completion for commands that should +// not trigger file completions. +func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return nil, ShellCompDirectiveNoFileComp +} + +// FixedCompletions can be used to create a completion function which always +// returns the same results. +func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) { + return choices, directive + } +} + +// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag. +func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error { + flag := c.Flag(flagName) + if flag == nil { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName) + } + flagCompletionMutex.Lock() + defer flagCompletionMutex.Unlock() + + if _, exists := flagCompletionFunctions[flag]; exists { + return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName) + } + flagCompletionFunctions[flag] = f + return nil +} + +// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available. +func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) { + flag := c.Flag(flagName) + if flag == nil { + return nil, false + } + + flagCompletionMutex.RLock() + defer flagCompletionMutex.RUnlock() + + completionFunc, exists := flagCompletionFunctions[flag] + return completionFunc, exists +} + +// Returns a string listing the different directive enabled in the specified parameter +func (d ShellCompDirective) string() string { + var directives []string + if d&ShellCompDirectiveError != 0 { + directives = append(directives, "ShellCompDirectiveError") + } + if d&ShellCompDirectiveNoSpace != 0 { + directives = append(directives, "ShellCompDirectiveNoSpace") + } + if d&ShellCompDirectiveNoFileComp != 0 { + directives = append(directives, "ShellCompDirectiveNoFileComp") + } + if d&ShellCompDirectiveFilterFileExt != 0 { + directives = append(directives, "ShellCompDirectiveFilterFileExt") + } + if d&ShellCompDirectiveFilterDirs != 0 { + directives = append(directives, "ShellCompDirectiveFilterDirs") + } + if d&ShellCompDirectiveKeepOrder != 0 { + directives = append(directives, "ShellCompDirectiveKeepOrder") + } + if len(directives) == 0 { + directives = append(directives, "ShellCompDirectiveDefault") + } + + if d >= shellCompDirectiveMaxValue { + return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d) + } + return strings.Join(directives, ", ") +} + +// initCompleteCmd adds a special hidden command that can be used to request custom completions. +func (c *Command) initCompleteCmd(args []string) { + completeCmd := &Command{ + Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd), + Aliases: []string{ShellCompNoDescRequestCmd}, + DisableFlagsInUseLine: true, + Hidden: true, + DisableFlagParsing: true, + Args: MinimumNArgs(1), + Short: "Request shell completion choices for the specified command-line", + Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s", + "to request completion choices for the specified command-line.", ShellCompRequestCmd), + Run: func(cmd *Command, args []string) { + finalCmd, completions, directive, err := cmd.getCompletions(args) + if err != nil { + CompErrorln(err.Error()) + // Keep going for multiple reasons: + // 1- There could be some valid completions even though there was an error + // 2- Even without completions, we need to print the directive + } + + noDescriptions := (cmd.CalledAs() == ShellCompNoDescRequestCmd) + for _, comp := range completions { + if GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable { + // Remove all activeHelp entries in this case + if strings.HasPrefix(comp, activeHelpMarker) { + continue + } + } + if noDescriptions { + // Remove any description that may be included following a tab character. + comp = strings.Split(comp, "\t")[0] + } + + // Make sure we only write the first line to the output. + // This is needed if a description contains a linebreak. + // Otherwise the shell scripts will interpret the other lines as new flags + // and could therefore provide a wrong completion. + comp = strings.Split(comp, "\n")[0] + + // Finally trim the completion. This is especially important to get rid + // of a trailing tab when there are no description following it. + // For example, a sub-command without a description should not be completed + // with a tab at the end (or else zsh will show a -- following it + // although there is no description). + comp = strings.TrimSpace(comp) + + // Print each possible completion to stdout for the completion script to consume. + fmt.Fprintln(finalCmd.OutOrStdout(), comp) + } + + // As the last printout, print the completion directive for the completion script to parse. + // The directive integer must be that last character following a single colon (:). + // The completion script expects : + fmt.Fprintf(finalCmd.OutOrStdout(), ":%d\n", directive) + + // Print some helpful info to stderr for the user to understand. + // Output from stderr must be ignored by the completion script. + fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string()) + }, + } + c.AddCommand(completeCmd) + subCmd, _, err := c.Find(args) + if err != nil || subCmd.Name() != ShellCompRequestCmd { + // Only create this special command if it is actually being called. + // This reduces possible side-effects of creating such a command; + // for example, having this command would cause problems to a + // cobra program that only consists of the root command, since this + // command would cause the root command to suddenly have a subcommand. + c.RemoveCommand(completeCmd) + } +} + +func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) { + // The last argument, which is not completely typed by the user, + // should not be part of the list of arguments + toComplete := args[len(args)-1] + trimmedArgs := args[:len(args)-1] + + var finalCmd *Command + var finalArgs []string + var err error + // Find the real command for which completion must be performed + // check if we need to traverse here to parse local flags on parent commands + if c.Root().TraverseChildren { + finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs) + } else { + // For Root commands that don't specify any value for their Args fields, when we call + // Find(), if those Root commands don't have any sub-commands, they will accept arguments. + // However, because we have added the __complete sub-command in the current code path, the + // call to Find() -> legacyArgs() will return an error if there are any arguments. + // To avoid this, we first remove the __complete command to get back to having no sub-commands. + rootCmd := c.Root() + if len(rootCmd.Commands()) == 1 { + rootCmd.RemoveCommand(c) + } + + finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs) + } + if err != nil { + // Unable to find the real command. E.g., someInvalidCmd + return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Unable to find a command for arguments: %v", trimmedArgs) + } + finalCmd.ctx = c.ctx + + // These flags are normally added when `execute()` is called on `finalCmd`, + // however, when doing completion, we don't call `finalCmd.execute()`. + // Let's add the --help and --version flag ourselves but only if the finalCmd + // has not disabled flag parsing; if flag parsing is disabled, it is up to the + // finalCmd itself to handle the completion of *all* flags. + if !finalCmd.DisableFlagParsing { + finalCmd.InitDefaultHelpFlag() + finalCmd.InitDefaultVersionFlag() + } + + // Check if we are doing flag value completion before parsing the flags. + // This is important because if we are completing a flag value, we need to also + // remove the flag name argument from the list of finalArgs or else the parsing + // could fail due to an invalid value (incomplete) for the flag. + flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete) + + // Check if interspersed is false or -- was set on a previous arg. + // This works by counting the arguments. Normally -- is not counted as arg but + // if -- was already set or interspersed is false and there is already one arg then + // the extra added -- is counted as arg. + flagCompletion := true + _ = finalCmd.ParseFlags(append(finalArgs, "--")) + newArgCount := finalCmd.Flags().NArg() + + // Parse the flags early so we can check if required flags are set + if err = finalCmd.ParseFlags(finalArgs); err != nil { + return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error()) + } + + realArgCount := finalCmd.Flags().NArg() + if newArgCount > realArgCount { + // don't do flag completion (see above) + flagCompletion = false + } + // Error while attempting to parse flags + if flagErr != nil { + // If error type is flagCompError and we don't want flagCompletion we should ignore the error + if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) { + return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr + } + } + + // Look for the --help or --version flags. If they are present, + // there should be no further completions. + if helpOrVersionFlagPresent(finalCmd) { + return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil + } + + // We only remove the flags from the arguments if DisableFlagParsing is not set. + // This is important for commands which have requested to do their own flag completion. + if !finalCmd.DisableFlagParsing { + finalArgs = finalCmd.Flags().Args() + } + + if flag != nil && flagCompletion { + // Check if we are completing a flag value subject to annotations + if validExts, present := flag.Annotations[BashCompFilenameExt]; present { + if len(validExts) != 0 { + // File completion filtered by extensions + return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil + } + + // The annotation requests simple file completion. There is no reason to do + // that since it is the default behavior anyway. Let's ignore this annotation + // in case the program also registered a completion function for this flag. + // Even though it is a mistake on the program's side, let's be nice when we can. + } + + if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present { + if len(subDir) == 1 { + // Directory completion from within a directory + return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil + } + // Directory completion + return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil + } + } + + var completions []string + var directive ShellCompDirective + + // Enforce flag groups before doing flag completions + finalCmd.enforceFlagGroupsForCompletion() + + // Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true; + // doing this allows for completion of persistent flag names even for commands that disable flag parsing. + // + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as it requires + // the flag name to be complete + if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion { + // First check for required flags + completions = completeRequireFlags(finalCmd, toComplete) + + // If we have not found any required flags, only then can we show regular flags + if len(completions) == 0 { + doCompleteFlags := func(flag *pflag.Flag) { + if !flag.Changed || + strings.Contains(flag.Value.Type(), "Slice") || + strings.Contains(flag.Value.Type(), "Array") { + // If the flag is not already present, or if it can be specified multiple times (Array or Slice) + // we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + // Try to complete non-inherited flags even if DisableFlagParsing==true. + // This allows programs to tell Cobra about flags for completion even + // if the actual parsing of flags is not done by Cobra. + // For instance, Helm uses this to provide flag name completion for + // some of its plugins. + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteFlags(flag) + }) + } + + directive = ShellCompDirectiveNoFileComp + if len(completions) == 1 && strings.HasSuffix(completions[0], "=") { + // If there is a single completion, the shell usually adds a space + // after the completion. We don't want that if the flag ends with an = + directive = ShellCompDirectiveNoSpace + } + + if !finalCmd.DisableFlagParsing { + // If DisableFlagParsing==false, we have completed the flags as known by Cobra; + // we can return what we found. + // If DisableFlagParsing==true, Cobra may not be aware of all flags, so we + // let the logic continue to see if ValidArgsFunction needs to be called. + return finalCmd, completions, directive, nil + } + } else { + directive = ShellCompDirectiveDefault + if flag == nil { + foundLocalNonPersistentFlag := false + // If TraverseChildren is true on the root command we don't check for + // local flags because we can use a local flag on a parent command + if !finalCmd.Root().TraverseChildren { + // Check if there are any local, non-persistent flags on the command-line + localNonPersistentFlags := finalCmd.LocalNonPersistentFlags() + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed { + foundLocalNonPersistentFlag = true + } + }) + } + + // Complete subcommand names, including the help command + if len(finalArgs) == 0 && !foundLocalNonPersistentFlag { + // We only complete sub-commands if: + // - there are no arguments on the command-line and + // - there are no local, non-persistent flags on the command-line or TraverseChildren is true + for _, subCmd := range finalCmd.Commands() { + if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand { + if strings.HasPrefix(subCmd.Name(), toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short)) + } + directive = ShellCompDirectiveNoFileComp + } + } + } + + // Complete required flags even without the '-' prefix + completions = append(completions, completeRequireFlags(finalCmd, toComplete)...) + + // Always complete ValidArgs, even if we are completing a subcommand name. + // This is for commands that have both subcommands and ValidArgs. + if len(finalCmd.ValidArgs) > 0 { + if len(finalArgs) == 0 { + // ValidArgs are only for the first argument + for _, validArg := range finalCmd.ValidArgs { + if strings.HasPrefix(validArg, toComplete) { + completions = append(completions, validArg) + } + } + directive = ShellCompDirectiveNoFileComp + + // If no completions were found within commands or ValidArgs, + // see if there are any ArgAliases that should be completed. + if len(completions) == 0 { + for _, argAlias := range finalCmd.ArgAliases { + if strings.HasPrefix(argAlias, toComplete) { + completions = append(completions, argAlias) + } + } + } + } + + // If there are ValidArgs specified (even if they don't match), we stop completion. + // Only one of ValidArgs or ValidArgsFunction can be used for a single command. + return finalCmd, completions, directive, nil + } + + // Let the logic continue so as to add any ValidArgsFunction completions, + // even if we already found sub-commands. + // This is for commands that have subcommands but also specify a ValidArgsFunction. + } + } + + // Find the completion function for the flag or command + var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) + if flag != nil && flagCompletion { + flagCompletionMutex.RLock() + completionFn = flagCompletionFunctions[flag] + flagCompletionMutex.RUnlock() + } else { + completionFn = finalCmd.ValidArgsFunction + } + if completionFn != nil { + // Go custom completion defined for this flag or command. + // Call the registered completion function to get the completions. + var comps []string + comps, directive = completionFn(finalCmd, finalArgs, toComplete) + completions = append(completions, comps...) + } + + return finalCmd, completions, directive, nil +} + +func helpOrVersionFlagPresent(cmd *Command) bool { + if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil && + len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed { + return true + } + if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil && + len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed { + return true + } + return false +} + +func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string { + if nonCompletableFlag(flag) { + return []string{} + } + + var completions []string + flagName := "--" + flag.Name + if strings.HasPrefix(flagName, toComplete) { + // Flag without the = + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + + // Why suggest both long forms: --flag and --flag= ? + // This forces the user to *always* have to type either an = or a space after the flag name. + // Let's be nice and avoid making users have to do that. + // Since boolean flags and shortname flags don't show the = form, let's go that route and never show it. + // The = form will still work, we just won't suggest it. + // This also makes the list of suggested flags shorter as we avoid all the = forms. + // + // if len(flag.NoOptDefVal) == 0 { + // // Flag requires a value, so it can be suffixed with = + // flagName += "=" + // completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + // } + } + + flagName = "-" + flag.Shorthand + if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) { + completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage)) + } + + return completions +} + +func completeRequireFlags(finalCmd *Command, toComplete string) []string { + var completions []string + + doCompleteRequiredFlags := func(flag *pflag.Flag) { + if _, present := flag.Annotations[BashCompOneRequiredFlag]; present { + if !flag.Changed { + // If the flag is not already present, we suggest it as a completion + completions = append(completions, getFlagNameCompletions(flag, toComplete)...) + } + } + } + + // We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands + // that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and + // non-inherited flags. + finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + doCompleteRequiredFlags(flag) + }) + + return completions +} + +func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) { + if finalCmd.DisableFlagParsing { + // We only do flag completion if we are allowed to parse flags + // This is important for commands which have requested to do their own flag completion. + return nil, args, lastArg, nil + } + + var flagName string + trimmedArgs := args + flagWithEqual := false + orgLastArg := lastArg + + // When doing completion of a flag name, as soon as an argument starts with + // a '-' we know it is a flag. We cannot use isFlagArg() here as that function + // requires the flag name to be complete + if len(lastArg) > 0 && lastArg[0] == '-' { + if index := strings.Index(lastArg, "="); index >= 0 { + // Flag with an = + if strings.HasPrefix(lastArg[:index], "--") { + // Flag has full name + flagName = lastArg[2:index] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = lastArg[index-1 : index] + } + lastArg = lastArg[index+1:] + flagWithEqual = true + } else { + // Normal flag completion + return nil, args, lastArg, nil + } + } + + if len(flagName) == 0 { + if len(args) > 0 { + prevArg := args[len(args)-1] + if isFlagArg(prevArg) { + // Only consider the case where the flag does not contain an =. + // If the flag contains an = it means it has already been fully processed, + // so we don't need to deal with it here. + if index := strings.Index(prevArg, "="); index < 0 { + if strings.HasPrefix(prevArg, "--") { + // Flag has full name + flagName = prevArg[2:] + } else { + // Flag is shorthand + // We have to get the last shorthand flag name + // e.g. `-asd` => d to provide the correct completion + // https://github.com/spf13/cobra/issues/1257 + flagName = prevArg[len(prevArg)-1:] + } + // Remove the uncompleted flag or else there could be an error created + // for an invalid value for that flag + trimmedArgs = args[:len(args)-1] + } + } + } + } + + if len(flagName) == 0 { + // Not doing flag completion + return nil, trimmedArgs, lastArg, nil + } + + flag := findFlag(finalCmd, flagName) + if flag == nil { + // Flag not supported by this command, the interspersed option might be set so return the original args + return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName} + } + + if !flagWithEqual { + if len(flag.NoOptDefVal) != 0 { + // We had assumed dealing with a two-word flag but the flag is a boolean flag. + // In that case, there is no value following it, so we are not really doing flag completion. + // Reset everything to do noun completion. + trimmedArgs = args + flag = nil + } + } + + return flag, trimmedArgs, lastArg, nil +} + +// InitDefaultCompletionCmd adds a default 'completion' command to c. +// This function will do nothing if any of the following is true: +// 1- the feature has been explicitly disabled by the program, +// 2- c has no subcommands (to avoid creating one), +// 3- c already has a 'completion' command provided by the program. +func (c *Command) InitDefaultCompletionCmd() { + if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() { + return + } + + for _, cmd := range c.commands { + if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) { + // A completion command is already available + return + } + } + + haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions + + completionCmd := &Command{ + Use: compCmdName, + Short: "Generate the autocompletion script for the specified shell", + Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell. +See each sub-command's help for details on how to use the generated script. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + Hidden: c.CompletionOptions.HiddenDefaultCmd, + GroupID: c.completionCommandGroupID, + } + c.AddCommand(completionCmd) + + out := c.OutOrStdout() + noDesc := c.CompletionOptions.DisableDescriptions + shortDesc := "Generate the autocompletion script for %s" + bash := &Command{ + Use: "bash", + Short: fmt.Sprintf(shortDesc, "bash"), + Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell. + +This script depends on the 'bash-completion' package. +If it is not installed already, you can install it via your OS's package manager. + +To load completions in your current shell session: + + source <(%[1]s completion bash) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion bash > /etc/bash_completion.d/%[1]s + +#### macOS: + + %[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + DisableFlagsInUseLine: true, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenBashCompletionV2(out, !noDesc) + }, + } + if haveNoDescFlag { + bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + zsh := &Command{ + Use: "zsh", + Short: fmt.Sprintf(shortDesc, "zsh"), + Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell. + +If shell completion is not already enabled in your environment you will need +to enable it. You can execute the following once: + + echo "autoload -U compinit; compinit" >> ~/.zshrc + +To load completions in your current shell session: + + source <(%[1]s completion zsh) + +To load completions for every new session, execute once: + +#### Linux: + + %[1]s completion zsh > "${fpath[1]}/_%[1]s" + +#### macOS: + + %[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenZshCompletionNoDesc(out) + } + return cmd.Root().GenZshCompletion(out) + }, + } + if haveNoDescFlag { + zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + fish := &Command{ + Use: "fish", + Short: fmt.Sprintf(shortDesc, "fish"), + Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell. + +To load completions in your current shell session: + + %[1]s completion fish | source + +To load completions for every new session, execute once: + + %[1]s completion fish > ~/.config/fish/completions/%[1]s.fish + +You will need to start a new shell for this setup to take effect. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + return cmd.Root().GenFishCompletion(out, !noDesc) + }, + } + if haveNoDescFlag { + fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + powershell := &Command{ + Use: "powershell", + Short: fmt.Sprintf(shortDesc, "powershell"), + Long: fmt.Sprintf(`Generate the autocompletion script for powershell. + +To load completions in your current shell session: + + %[1]s completion powershell | Out-String | Invoke-Expression + +To load completions for every new session, add the output of the above command +to your powershell profile. +`, c.Root().Name()), + Args: NoArgs, + ValidArgsFunction: NoFileCompletions, + RunE: func(cmd *Command, args []string) error { + if noDesc { + return cmd.Root().GenPowerShellCompletion(out) + } + return cmd.Root().GenPowerShellCompletionWithDesc(out) + + }, + } + if haveNoDescFlag { + powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc) + } + + completionCmd.AddCommand(bash, zsh, fish, powershell) +} + +func findFlag(cmd *Command, name string) *pflag.Flag { + flagSet := cmd.Flags() + if len(name) == 1 { + // First convert the short flag into a long flag + // as the cmd.Flag() search only accepts long flags + if short := flagSet.ShorthandLookup(name); short != nil { + name = short.Name + } else { + set := cmd.InheritedFlags() + if short = set.ShorthandLookup(name); short != nil { + name = short.Name + } else { + return nil + } + } + } + return cmd.Flag(name) +} + +// CompDebug prints the specified string to the same file as where the +// completion script prints its logs. +// Note that completion printouts should never be on stdout as they would +// be wrongly interpreted as actual completion choices by the completion script. +func CompDebug(msg string, printToStdErr bool) { + msg = fmt.Sprintf("[Debug] %s", msg) + + // Such logs are only printed when the user has set the environment + // variable BASH_COMP_DEBUG_FILE to the path of some file to be used. + if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" { + f, err := os.OpenFile(path, + os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err == nil { + defer f.Close() + WriteStringAndCheck(f, msg) + } + } + + if printToStdErr { + // Must print to stderr for this not to be read by the completion script. + fmt.Fprint(os.Stderr, msg) + } +} + +// CompDebugln prints the specified string with a newline at the end +// to the same file as where the completion script prints its logs. +// Such logs are only printed when the user has set the environment +// variable BASH_COMP_DEBUG_FILE to the path of some file to be used. +func CompDebugln(msg string, printToStdErr bool) { + CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr) +} + +// CompError prints the specified completion message to stderr. +func CompError(msg string) { + msg = fmt.Sprintf("[Error] %s", msg) + CompDebug(msg, true) +} + +// CompErrorln prints the specified completion message to stderr with a newline at the end. +func CompErrorln(msg string) { + CompError(fmt.Sprintf("%s\n", msg)) +} diff --git a/vendor/github.com/spf13/cobra/fish_completions.go b/vendor/github.com/spf13/cobra/fish_completions.go new file mode 100644 index 0000000000..12d61b6911 --- /dev/null +++ b/vendor/github.com/spf13/cobra/fish_completions.go @@ -0,0 +1,292 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +func genFishComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.ReplaceAll(nameForVar, "-", "_") + nameForVar = strings.ReplaceAll(nameForVar, ":", "_") + + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name)) + WriteStringAndCheck(buf, fmt.Sprintf(` +function __%[1]s_debug + set -l file "$BASH_COMP_DEBUG_FILE" + if test -n "$file" + echo "$argv" >> $file + end +end + +function __%[1]s_perform_completion + __%[1]s_debug "Starting __%[1]s_perform_completion" + + # Extract all args except the last one + set -l args (commandline -opc) + # Extract the last arg and escape it in case it is a space + set -l lastArg (string escape -- (commandline -ct)) + + __%[1]s_debug "args: $args" + __%[1]s_debug "last arg: $lastArg" + + # Disable ActiveHelp which is not supported for fish shell + set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg" + + __%[1]s_debug "Calling $requestComp" + set -l results (eval $requestComp 2> /dev/null) + + # Some programs may output extra empty lines after the directive. + # Let's ignore them or else it will break completion. + # Ref: https://github.com/spf13/cobra/issues/1279 + for line in $results[-1..1] + if test (string trim -- $line) = "" + # Found an empty line, remove it + set results $results[1..-2] + else + # Found non-empty line, we have our proper output + break + end + end + + set -l comps $results[1..-2] + set -l directiveLine $results[-1] + + # For Fish, when completing a flag with an = (e.g., -n=) + # completions must be prefixed with the flag + set -l flagPrefix (string match -r -- '-.*=' "$lastArg") + + __%[1]s_debug "Comps: $comps" + __%[1]s_debug "DirectiveLine: $directiveLine" + __%[1]s_debug "flagPrefix: $flagPrefix" + + for comp in $comps + printf "%%s%%s\n" "$flagPrefix" "$comp" + end + + printf "%%s\n" "$directiveLine" +end + +# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result +function __%[1]s_perform_completion_once + __%[1]s_debug "Starting __%[1]s_perform_completion_once" + + if test -n "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion" + return 0 + end + + set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion) + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completions, probably due to a failure" + return 1 + end + + __%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result" + return 0 +end + +# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run +function __%[1]s_clear_perform_completion_once_result + __%[1]s_debug "" + __%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable ==========" + set --erase __%[1]s_perform_completion_once_result + __%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result" +end + +function __%[1]s_requires_order_preservation + __%[1]s_debug "" + __%[1]s_debug "========= checking if order preservation is required ==========" + + __%[1]s_perform_completion_once + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "Error determining if order preservation is required" + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveKeepOrder %[9]d + set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2) + __%[1]s_debug "Keeporder is: $keeporder" + + if test $keeporder -ne 0 + __%[1]s_debug "This does require order preservation" + return 0 + end + + __%[1]s_debug "This doesn't require order preservation" + return 1 +end + + +# This function does two things: +# - Obtain the completions and store them in the global __%[1]s_comp_results +# - Return false if file completion should be performed +function __%[1]s_prepare_completions + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + + # Start fresh + set --erase __%[1]s_comp_results + + __%[1]s_perform_completion_once + __%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result" + + if test -z "$__%[1]s_perform_completion_once_result" + __%[1]s_debug "No completion, probably due to a failure" + # Might as well do file completion, in case it helps + return 1 + end + + set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1]) + set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2] + + __%[1]s_debug "Completions are: $__%[1]s_comp_results" + __%[1]s_debug "Directive is: $directive" + + set -l shellCompDirectiveError %[4]d + set -l shellCompDirectiveNoSpace %[5]d + set -l shellCompDirectiveNoFileComp %[6]d + set -l shellCompDirectiveFilterFileExt %[7]d + set -l shellCompDirectiveFilterDirs %[8]d + + if test -z "$directive" + set directive 0 + end + + set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2) + if test $compErr -eq 1 + __%[1]s_debug "Received error directive: aborting." + # Might as well do file completion, in case it helps + return 1 + end + + set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2) + set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2) + if test $filefilter -eq 1; or test $dirfilter -eq 1 + __%[1]s_debug "File extension filtering or directory filtering not supported" + # Do full file completion instead + return 1 + end + + set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2) + set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2) + + __%[1]s_debug "nospace: $nospace, nofiles: $nofiles" + + # If we want to prevent a space, or if file completion is NOT disabled, + # we need to count the number of valid completions. + # To do so, we will filter on prefix as the completions we have received + # may not already be filtered so as to allow fish to match on different + # criteria than the prefix. + if test $nospace -ne 0; or test $nofiles -eq 0 + set -l prefix (commandline -t | string escape --style=regex) + __%[1]s_debug "prefix: $prefix" + + set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results) + set --global __%[1]s_comp_results $completions + __%[1]s_debug "Filtered completions are: $__%[1]s_comp_results" + + # Important not to quote the variable for count to work + set -l numComps (count $__%[1]s_comp_results) + __%[1]s_debug "numComps: $numComps" + + if test $numComps -eq 1; and test $nospace -ne 0 + # We must first split on \t to get rid of the descriptions to be + # able to check what the actual completion will be. + # We don't need descriptions anyway since there is only a single + # real completion which the shell will expand immediately. + set -l split (string split --max 1 \t $__%[1]s_comp_results[1]) + + # Fish won't add a space if the completion ends with any + # of the following characters: @=/:., + set -l lastChar (string sub -s -1 -- $split) + if not string match -r -q "[@=/:.,]" -- "$lastChar" + # In other cases, to support the "nospace" directive we trick the shell + # by outputting an extra, longer completion. + __%[1]s_debug "Adding second completion to perform nospace directive" + set --global __%[1]s_comp_results $split[1] $split[1]. + __%[1]s_debug "Completions are now: $__%[1]s_comp_results" + end + end + + if test $numComps -eq 0; and test $nofiles -eq 0 + # To be consistent with bash and zsh, we only trigger file + # completion when there are no other completions + __%[1]s_debug "Requesting file completion" + return 1 + end + end + + return 0 +end + +# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves +# so we can properly delete any completions provided by another script. +# Only do this if the program can be found, or else fish may print some errors; besides, +# the existing completions will only be loaded if the program can be found. +if type -q "%[2]s" + # The space after the program name is essential to trigger completion for the program + # and not completion of the program name itself. + # Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish. + complete --do-complete "%[2]s " > /dev/null 2>&1 +end + +# Remove any pre-existing completions for the program since we will be handling all of them. +complete -c %[2]s -e + +# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global +complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result' +# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results +# which provides the program's completion choices. +# If this doesn't require order preservation, we don't use the -k flag +complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +# otherwise we use the -k flag +complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results' +`, nameForVar, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) +} + +// GenFishCompletion generates fish completion file and writes to the passed writer. +func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genFishComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +// GenFishCompletionFile generates fish completion file. +func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.GenFishCompletion(outFile, includeDesc) +} diff --git a/vendor/github.com/spf13/cobra/flag_groups.go b/vendor/github.com/spf13/cobra/flag_groups.go new file mode 100644 index 0000000000..0671ec5f20 --- /dev/null +++ b/vendor/github.com/spf13/cobra/flag_groups.go @@ -0,0 +1,290 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "fmt" + "sort" + "strings" + + flag "github.com/spf13/pflag" +) + +const ( + requiredAsGroup = "cobra_annotation_required_if_others_set" + oneRequired = "cobra_annotation_one_required" + mutuallyExclusive = "cobra_annotation_mutually_exclusive" +) + +// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors +// if the command is invoked with a subset (but not all) of the given flags. +func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v)) + } + if err := c.Flags().SetAnnotation(v, requiredAsGroup, append(f.Annotations[requiredAsGroup], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors +// if the command is invoked without at least one flag from the given set of flags. +func (c *Command) MarkFlagsOneRequired(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v)) + } + if err := c.Flags().SetAnnotation(v, oneRequired, append(f.Annotations[oneRequired], strings.Join(flagNames, " "))); err != nil { + // Only errs if the flag isn't found. + panic(err) + } + } +} + +// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors +// if the command is invoked with more than one flag from the given set of flags. +func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) { + c.mergePersistentFlags() + for _, v := range flagNames { + f := c.Flags().Lookup(v) + if f == nil { + panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v)) + } + // Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed. + if err := c.Flags().SetAnnotation(v, mutuallyExclusive, append(f.Annotations[mutuallyExclusive], strings.Join(flagNames, " "))); err != nil { + panic(err) + } + } +} + +// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the +// first error encountered. +func (c *Command) ValidateFlagGroups() error { + if c.DisableFlagParsing { + return nil + } + + flags := c.Flags() + + // groupStatus format is the list of flags as a unique ID, + // then a map of each flag name and whether it is set or not. + groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + flags.VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + }) + + if err := validateRequiredFlagGroups(groupStatus); err != nil { + return err + } + if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil { + return err + } + if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil { + return err + } + return nil +} + +func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool { + for _, fname := range flagnames { + f := fs.Lookup(fname) + if f == nil { + return false + } + } + return true +} + +func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) { + groupInfo, found := pflag.Annotations[annotation] + if found { + for _, group := range groupInfo { + if groupStatus[group] == nil { + flagnames := strings.Split(group, " ") + + // Only consider this flag group at all if all the flags are defined. + if !hasAllFlags(flags, flagnames...) { + continue + } + + groupStatus[group] = map[string]bool{} + for _, name := range flagnames { + groupStatus[group][name] = false + } + } + + groupStatus[group][pflag.Name] = pflag.Changed + } + } +} + +func validateRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + + unset := []string{} + for flagname, isSet := range flagnameAndStatus { + if !isSet { + unset = append(unset, flagname) + } + } + if len(unset) == len(flagnameAndStatus) || len(unset) == 0 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(unset) + return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset) + } + + return nil +} + +func validateOneRequiredFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) >= 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList) + } + return nil +} + +func validateExclusiveFlagGroups(data map[string]map[string]bool) error { + keys := sortedKeys(data) + for _, flagList := range keys { + flagnameAndStatus := data[flagList] + var set []string + for flagname, isSet := range flagnameAndStatus { + if isSet { + set = append(set, flagname) + } + } + if len(set) == 0 || len(set) == 1 { + continue + } + + // Sort values, so they can be tested/scripted against consistently. + sort.Strings(set) + return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set) + } + return nil +} + +func sortedKeys(m map[string]map[string]bool) []string { + keys := make([]string, len(m)) + i := 0 + for k := range m { + keys[i] = k + i++ + } + sort.Strings(keys) + return keys +} + +// enforceFlagGroupsForCompletion will do the following: +// - when a flag in a group is present, other flags in the group will be marked required +// - when none of the flags in a one-required group are present, all flags in the group will be marked required +// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden +// This allows the standard completion logic to behave appropriately for flag groups +func (c *Command) enforceFlagGroupsForCompletion() { + if c.DisableFlagParsing { + return + } + + flags := c.Flags() + groupStatus := map[string]map[string]bool{} + oneRequiredGroupStatus := map[string]map[string]bool{} + mutuallyExclusiveGroupStatus := map[string]map[string]bool{} + c.Flags().VisitAll(func(pflag *flag.Flag) { + processFlagForGroupAnnotation(flags, pflag, requiredAsGroup, groupStatus) + processFlagForGroupAnnotation(flags, pflag, oneRequired, oneRequiredGroupStatus) + processFlagForGroupAnnotation(flags, pflag, mutuallyExclusive, mutuallyExclusiveGroupStatus) + }) + + // If a flag that is part of a group is present, we make all the other flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range groupStatus { + for _, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the group is set, mark the other ones as required + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + } + + // If none of the flags of a one-required group are present, we make all the flags + // of that group required so that the shell completion suggests them automatically + for flagList, flagnameAndStatus := range oneRequiredGroupStatus { + set := 0 + + for _, isSet := range flagnameAndStatus { + if isSet { + set++ + } + } + + // None of the flags of the group are set, mark all flags in the group + // as required + if set == 0 { + for _, fName := range strings.Split(flagList, " ") { + _ = c.MarkFlagRequired(fName) + } + } + } + + // If a flag that is mutually exclusive to others is present, we hide the other + // flags of that group so the shell completion does not suggest them + for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus { + for flagName, isSet := range flagnameAndStatus { + if isSet { + // One of the flags of the mutually exclusive group is set, mark the other ones as hidden + // Don't mark the flag that is already set as hidden because it may be an + // array or slice flag and therefore must continue being suggested + for _, fName := range strings.Split(flagList, " ") { + if fName != flagName { + flag := c.Flags().Lookup(fName) + flag.Hidden = true + } + } + } + } + } +} diff --git a/vendor/github.com/spf13/cobra/powershell_completions.go b/vendor/github.com/spf13/cobra/powershell_completions.go new file mode 100644 index 0000000000..5519519394 --- /dev/null +++ b/vendor/github.com/spf13/cobra/powershell_completions.go @@ -0,0 +1,325 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but +// can be downloaded separately for windows 7 or 8.1). + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" + "strings" +) + +func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) { + // Variables should not contain a '-' or ':' character + nameForVar := name + nameForVar = strings.Replace(nameForVar, "-", "_", -1) + nameForVar = strings.Replace(nameForVar, ":", "_", -1) + + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*- + +function __%[1]s_debug { + if ($env:BASH_COMP_DEBUG_FILE) { + "$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE" + } +} + +filter __%[1]s_escapeStringWithSpecialChars { +`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+` +} + +[scriptblock]${__%[2]sCompleterBlock} = { + param( + $WordToComplete, + $CommandAst, + $CursorPosition + ) + + # Get the current command line and convert into a string + $Command = $CommandAst.CommandElements + $Command = "$Command" + + __%[1]s_debug "" + __%[1]s_debug "========= starting completion logic ==========" + __%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CursorPosition location, so we need + # to truncate the command-line ($Command) up to the $CursorPosition location. + # Make sure the $Command is longer then the $CursorPosition before we truncate. + # This happens because the $Command does not include the last space. + if ($Command.Length -gt $CursorPosition) { + $Command=$Command.Substring(0,$CursorPosition) + } + __%[1]s_debug "Truncated command: $Command" + + $ShellCompDirectiveError=%[4]d + $ShellCompDirectiveNoSpace=%[5]d + $ShellCompDirectiveNoFileComp=%[6]d + $ShellCompDirectiveFilterFileExt=%[7]d + $ShellCompDirectiveFilterDirs=%[8]d + $ShellCompDirectiveKeepOrder=%[9]d + + # Prepare the command to request completions for the program. + # Split the command at the first space to separate the program and arguments. + $Program,$Arguments = $Command.Split(" ",2) + + $RequestComp="$Program %[3]s $Arguments" + __%[1]s_debug "RequestComp: $RequestComp" + + # we cannot use $WordToComplete because it + # has the wrong values if the cursor was moved + # so use the last argument + if ($WordToComplete -ne "" ) { + $WordToComplete = $Arguments.Split(" ")[-1] + } + __%[1]s_debug "New WordToComplete: $WordToComplete" + + + # Check for flag with equal sign + $IsEqualFlag = ($WordToComplete -Like "--*=*" ) + if ( $IsEqualFlag ) { + __%[1]s_debug "Completing equal sign flag" + # Remove the flag part + $Flag,$WordToComplete = $WordToComplete.Split("=",2) + } + + if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) { + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go method. + __%[1]s_debug "Adding extra empty parameter" + # PowerShell 7.2+ changed the way how the arguments are passed to executables, + # so for pre-7.2 or when Legacy argument passing is enabled we need to use +`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+` + if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or + ($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or + (($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and + $PSNativeCommandArgumentPassing -eq 'Legacy')) { +`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+` + } else { + $RequestComp="$RequestComp" + ' ""' + } + } + + __%[1]s_debug "Calling $RequestComp" + # First disable ActiveHelp which is not supported for Powershell + ${env:%[10]s}=0 + + #call the command store the output in $out and redirect stderr and stdout to null + # $Out is an array contains each line per element + Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null + + # get directive from last line + [int]$Directive = $Out[-1].TrimStart(':') + if ($Directive -eq "") { + # There is no directive specified + $Directive = 0 + } + __%[1]s_debug "The completion directive is: $Directive" + + # remove directive (last element) from out + $Out = $Out | Where-Object { $_ -ne $Out[-1] } + __%[1]s_debug "The completions are: $Out" + + if (($Directive -band $ShellCompDirectiveError) -ne 0 ) { + # Error code. No completion. + __%[1]s_debug "Received error from custom completion go code" + return + } + + $Longest = 0 + [Array]$Values = $Out | ForEach-Object { + #Split the output in name and description +`+" $Name, $Description = $_.Split(\"`t\",2)"+` + __%[1]s_debug "Name: $Name Description: $Description" + + # Look for the longest completion so that we can format things nicely + if ($Longest -lt $Name.Length) { + $Longest = $Name.Length + } + + # Set the description to a one space string if there is none set. + # This is needed because the CompletionResult does not accept an empty string as argument + if (-Not $Description) { + $Description = " " + } + @{Name="$Name";Description="$Description"} + } + + + $Space = " " + if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) { + # remove the space here + __%[1]s_debug "ShellCompDirectiveNoSpace is called" + $Space = "" + } + + if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or + (($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) { + __%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported" + + # return here to prevent the completion of the extensions + return + } + + $Values = $Values | Where-Object { + # filter the result + $_.Name -like "$WordToComplete*" + + # Join the flag back if we have an equal sign flag + if ( $IsEqualFlag ) { + __%[1]s_debug "Join the equal sign flag back to the completion value" + $_.Name = $Flag + "=" + $_.Name + } + } + + # we sort the values in ascending order by name if keep order isn't passed + if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) { + $Values = $Values | Sort-Object -Property Name + } + + if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) { + __%[1]s_debug "ShellCompDirectiveNoFileComp is called" + + if ($Values.Length -eq 0) { + # Just print an empty string here so the + # shell does not start to complete paths. + # We cannot use CompletionResult here because + # it does not accept an empty string as argument. + "" + return + } + } + + # Get the current mode + $Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function + __%[1]s_debug "Mode: $Mode" + + $Values | ForEach-Object { + + # store temporary because switch will overwrite $_ + $comp = $_ + + # PowerShell supports three different completion modes + # - TabCompleteNext (default windows style - on each key press the next option is displayed) + # - Complete (works like bash) + # - MenuComplete (works like zsh) + # You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function + + # CompletionResult Arguments: + # 1) CompletionText text to be used as the auto completion result + # 2) ListItemText text to be displayed in the suggestion list + # 3) ResultType type of completion result + # 4) ToolTip text for the tooltip with details about the object + + switch ($Mode) { + + # bash like + "Complete" { + + if ($Values.Length -eq 1) { + __%[1]s_debug "Only one completion left" + + # insert space after value + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + + } else { + # Add the proper number of spaces to align the descriptions + while($comp.Name.Length -lt $Longest) { + $comp.Name = $comp.Name + " " + } + + # Check for empty description and only add parentheses if needed + if ($($comp.Description) -eq " " ) { + $Description = "" + } else { + $Description = " ($($comp.Description))" + } + + [System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)") + } + } + + # zsh like + "MenuComplete" { + # insert space after value + # MenuComplete will automatically show the ToolTip of + # the highlighted value at the bottom of the suggestions. + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + + # TabCompleteNext and in case we get something unknown + Default { + # Like MenuComplete but we don't want to add a space here because + # the user need to press space anyway to get the completion. + # Description will not be shown because that's not possible with TabCompleteNext + [System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)") + } + } + + } +} + +Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock} +`, name, nameForVar, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name))) +} + +func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genPowerShellComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.genPowerShellCompletion(outFile, includeDesc) +} + +// GenPowerShellCompletionFile generates powershell completion file without descriptions. +func (c *Command) GenPowerShellCompletionFile(filename string) error { + return c.genPowerShellCompletionFile(filename, false) +} + +// GenPowerShellCompletion generates powershell completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletion(w io.Writer) error { + return c.genPowerShellCompletion(w, false) +} + +// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions. +func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error { + return c.genPowerShellCompletionFile(filename, true) +} + +// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions +// and writes it to the passed writer. +func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error { + return c.genPowerShellCompletion(w, true) +} diff --git a/vendor/github.com/spf13/cobra/shell_completions.go b/vendor/github.com/spf13/cobra/shell_completions.go new file mode 100644 index 0000000000..b035742d39 --- /dev/null +++ b/vendor/github.com/spf13/cobra/shell_completions.go @@ -0,0 +1,98 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "github.com/spf13/pflag" +) + +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(c.Flags(), name) +} + +// MarkPersistentFlagRequired instructs the various shell completion implementations to +// prioritize the named persistent flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func (c *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(c.PersistentFlags(), name) +} + +// MarkFlagRequired instructs the various shell completion implementations to +// prioritize the named flag when performing completion, +// and causes your command to report an error if invoked without the flag. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. +func (c *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. +func (c *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(c.Flags(), name, f) +} + +// MarkPersistentFlagFilename instructs the various shell completion +// implementations to limit completions for the named persistent flag to the +// specified file extensions. +func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(c.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename instructs the various shell completion implementations to +// limit completions for the named flag to the specified file extensions. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// The bash completion script will call the bash function f for the flag. +// +// This will only work for bash completion. +// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows +// to register a Go function which will work across all shells. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// limit completions for the named flag to directory names. +func (c *Command) MarkFlagDirname(name string) error { + return MarkFlagDirname(c.Flags(), name) +} + +// MarkPersistentFlagDirname instructs the various shell completion +// implementations to limit completions for the named persistent flag to +// directory names. +func (c *Command) MarkPersistentFlagDirname(name string) error { + return MarkFlagDirname(c.PersistentFlags(), name) +} + +// MarkFlagDirname instructs the various shell completion implementations to +// limit completions for the named flag to directory names. +func MarkFlagDirname(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{}) +} diff --git a/vendor/github.com/spf13/cobra/zsh_completions.go b/vendor/github.com/spf13/cobra/zsh_completions.go new file mode 100644 index 0000000000..1856e4c7f6 --- /dev/null +++ b/vendor/github.com/spf13/cobra/zsh_completions.go @@ -0,0 +1,308 @@ +// Copyright 2013-2023 The Cobra Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cobra + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// GenZshCompletionFile generates zsh completion file including descriptions. +func (c *Command) GenZshCompletionFile(filename string) error { + return c.genZshCompletionFile(filename, true) +} + +// GenZshCompletion generates zsh completion file including descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletion(w io.Writer) error { + return c.genZshCompletion(w, true) +} + +// GenZshCompletionFileNoDesc generates zsh completion file without descriptions. +func (c *Command) GenZshCompletionFileNoDesc(filename string) error { + return c.genZshCompletionFile(filename, false) +} + +// GenZshCompletionNoDesc generates zsh completion file without descriptions +// and writes it to the passed writer. +func (c *Command) GenZshCompletionNoDesc(w io.Writer) error { + return c.genZshCompletion(w, false) +} + +// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was +// not consistent with Bash completion. It has therefore been disabled. +// Instead, when no other completion is specified, file completion is done by +// default for every argument. One can disable file completion on a per-argument +// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp. +// To achieve file extension filtering, one can use ValidArgsFunction and +// ShellCompDirectiveFilterFileExt. +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error { + return nil +} + +// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore +// been disabled. +// To achieve the same behavior across all shells, one can use +// ValidArgs (for the first argument only) or ValidArgsFunction for +// any argument (can include the first one also). +// +// Deprecated +func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error { + return nil +} + +func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return c.genZshCompletion(outFile, includeDesc) +} + +func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error { + buf := new(bytes.Buffer) + genZshComp(buf, c.Name(), includeDesc) + _, err := buf.WriteTo(w) + return err +} + +func genZshComp(buf io.StringWriter, name string, includeDesc bool) { + compCmd := ShellCompRequestCmd + if !includeDesc { + compCmd = ShellCompNoDescRequestCmd + } + WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s +compdef _%[1]s %[1]s + +# zsh completion for %-36[1]s -*- shell-script -*- + +__%[1]s_debug() +{ + local file="$BASH_COMP_DEBUG_FILE" + if [[ -n ${file} ]]; then + echo "$*" >> "${file}" + fi +} + +_%[1]s() +{ + local shellCompDirectiveError=%[3]d + local shellCompDirectiveNoSpace=%[4]d + local shellCompDirectiveNoFileComp=%[5]d + local shellCompDirectiveFilterFileExt=%[6]d + local shellCompDirectiveFilterDirs=%[7]d + local shellCompDirectiveKeepOrder=%[8]d + + local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder + local -a completions + + __%[1]s_debug "\n========= starting completion logic ==========" + __%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}" + + # The user could have moved the cursor backwards on the command-line. + # We need to trigger completion from the $CURRENT location, so we need + # to truncate the command-line ($words) up to the $CURRENT location. + # (We cannot use $CURSOR as its value does not work when a command is an alias.) + words=("${=words[1,CURRENT]}") + __%[1]s_debug "Truncated words[*]: ${words[*]}," + + lastParam=${words[-1]} + lastChar=${lastParam[-1]} + __%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}" + + # For zsh, when completing a flag with an = (e.g., %[1]s -n=) + # completions must be prefixed with the flag + setopt local_options BASH_REMATCH + if [[ "${lastParam}" =~ '-.*=' ]]; then + # We are dealing with a flag with an = + flagPrefix="-P ${BASH_REMATCH}" + fi + + # Prepare the command to obtain completions + requestComp="${words[1]} %[2]s ${words[2,-1]}" + if [ "${lastChar}" = "" ]; then + # If the last parameter is complete (there is a space following it) + # We add an extra empty parameter so we can indicate this to the go completion code. + __%[1]s_debug "Adding extra empty parameter" + requestComp="${requestComp} \"\"" + fi + + __%[1]s_debug "About to call: eval ${requestComp}" + + # Use eval to handle any environment variables and such + out=$(eval ${requestComp} 2>/dev/null) + __%[1]s_debug "completion output: ${out}" + + # Extract the directive integer following a : from the last line + local lastLine + while IFS='\n' read -r line; do + lastLine=${line} + done < <(printf "%%s\n" "${out[@]}") + __%[1]s_debug "last line: ${lastLine}" + + if [ "${lastLine[1]}" = : ]; then + directive=${lastLine[2,-1]} + # Remove the directive including the : and the newline + local suffix + (( suffix=${#lastLine}+2)) + out=${out[1,-$suffix]} + else + # There is no directive specified. Leave $out as is. + __%[1]s_debug "No directive found. Setting do default" + directive=0 + fi + + __%[1]s_debug "directive: ${directive}" + __%[1]s_debug "completions: ${out}" + __%[1]s_debug "flagPrefix: ${flagPrefix}" + + if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then + __%[1]s_debug "Completion received error. Ignoring completions." + return + fi + + local activeHelpMarker="%[9]s" + local endIndex=${#activeHelpMarker} + local startIndex=$((${#activeHelpMarker}+1)) + local hasActiveHelp=0 + while IFS='\n' read -r comp; do + # Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker) + if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then + __%[1]s_debug "ActiveHelp found: $comp" + comp="${comp[$startIndex,-1]}" + if [ -n "$comp" ]; then + compadd -x "${comp}" + __%[1]s_debug "ActiveHelp will need delimiter" + hasActiveHelp=1 + fi + + continue + fi + + if [ -n "$comp" ]; then + # If requested, completions are returned with a description. + # The description is preceded by a TAB character. + # For zsh's _describe, we need to use a : instead of a TAB. + # We first need to escape any : as part of the completion itself. + comp=${comp//:/\\:} + + local tab="$(printf '\t')" + comp=${comp//$tab/:} + + __%[1]s_debug "Adding completion: ${comp}" + completions+=${comp} + lastComp=$comp + fi + done < <(printf "%%s\n" "${out[@]}") + + # Add a delimiter after the activeHelp statements, but only if: + # - there are completions following the activeHelp statements, or + # - file completion will be performed (so there will be choices after the activeHelp) + if [ $hasActiveHelp -eq 1 ]; then + if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then + __%[1]s_debug "Adding activeHelp delimiter" + compadd -x "--" + hasActiveHelp=0 + fi + fi + + if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then + __%[1]s_debug "Activating nospace." + noSpace="-S ''" + fi + + if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then + __%[1]s_debug "Activating keep order." + keepOrder="-V" + fi + + if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then + # File extension filtering + local filteringCmd + filteringCmd='_files' + for filter in ${completions[@]}; do + if [ ${filter[1]} != '*' ]; then + # zsh requires a glob pattern to do file filtering + filter="\*.$filter" + fi + filteringCmd+=" -g $filter" + done + filteringCmd+=" ${flagPrefix}" + + __%[1]s_debug "File filtering command: $filteringCmd" + _arguments '*:filename:'"$filteringCmd" + elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then + # File completion for directories only + local subdir + subdir="${completions[1]}" + if [ -n "$subdir" ]; then + __%[1]s_debug "Listing directories in $subdir" + pushd "${subdir}" >/dev/null 2>&1 + else + __%[1]s_debug "Listing directories in ." + fi + + local result + _arguments '*:dirname:_files -/'" ${flagPrefix}" + result=$? + if [ -n "$subdir" ]; then + popd >/dev/null 2>&1 + fi + return $result + else + __%[1]s_debug "Calling _describe" + if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then + __%[1]s_debug "_describe found some completions" + + # Return the success of having called _describe + return 0 + else + __%[1]s_debug "_describe did not find completions." + __%[1]s_debug "Checking if we should do file completion." + if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then + __%[1]s_debug "deactivating file completion" + + # We must return an error code here to let zsh know that there were no + # completions found by _describe; this is what will trigger other + # matching algorithms to attempt to find completions. + # For example zsh can match letters in the middle of words. + return 1 + else + # Perform file completion + __%[1]s_debug "Activating file completion" + + # We must return the result of this command, so it must be the + # last command, or else we must store its result to return it. + _arguments '*:filename:_files'" ${flagPrefix}" + fi + fi + fi +} + +# don't run the completion function when being source-ed or eval-ed +if [ "$funcstack[1]" = "_%[1]s" ]; then + _%[1]s +fi +`, name, compCmd, + ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp, + ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, + activeHelpMarker)) +} diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml index fbc6df7906..2346df1351 100644 --- a/vendor/go.uber.org/zap/.golangci.yml +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -17,7 +17,7 @@ linters: - unused # Our own extras: - - gofmt + - gofumpt - nolintlint # lints nolint directives - revive diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl index 92aa65d660..4fea3027af 100644 --- a/vendor/go.uber.org/zap/.readme.tmpl +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -1,7 +1,15 @@ # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +
+ Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md index 11b4659761..6d6cd5f4d7 100644 --- a/vendor/go.uber.org/zap/CHANGELOG.md +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + ## 1.26.0 (14 Sep 2023) Enhancements: +* [#1297][]: Add Dict as a Field. * [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured context. * [#1350][]: String encoding is much (~50%) faster now. -Thanks to @jquirke, @cdvr1993 for their contributions to this release. +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. +[#1297]: https://github.com/uber-go/zap/pull/1297 [#1319]: https://github.com/uber-go/zap/pull/1319 [#1350]: https://github.com/uber-go/zap/pull/1350 @@ -25,7 +41,7 @@ Enhancements: * [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. * [#1281][]: Add `zap/exp/expfield` package which contains helper methods `Str` and `Strs` for constructing String-like zap.Fields. -* [#1310][]: Reduce stack size on `Any`. +* [#1310][]: Reduce stack size on `Any`. Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions to this release. @@ -352,7 +368,7 @@ to this release. [#675]: https://github.com/uber-go/zap/pull/675 [#704]: https://github.com/uber-go/zap/pull/704 -## v1.9.1 (06 Aug 2018) +## 1.9.1 (06 Aug 2018) Bugfixes: @@ -360,7 +376,7 @@ Bugfixes: [#614]: https://github.com/uber-go/zap/pull/614 -## v1.9.0 (19 Jul 2018) +## 1.9.0 (19 Jul 2018) Enhancements: * [#602][]: Reduce number of allocations when logging with reflection. @@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and [#572]: https://github.com/uber-go/zap/pull/572 [#606]: https://github.com/uber-go/zap/pull/606 -## v1.8.0 (13 Apr 2018) +## 1.8.0 (13 Apr 2018) Enhancements: * [#508][]: Make log level configurable when redirecting the standard @@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release. [#577]: https://github.com/uber-go/zap/pull/577 [#574]: https://github.com/uber-go/zap/pull/574 -## v1.7.1 (25 Sep 2017) +## 1.7.1 (25 Sep 2017) Bugfixes: * [#504][]: Store strings when using AddByteString with the map encoder. [#504]: https://github.com/uber-go/zap/pull/504 -## v1.7.0 (21 Sep 2017) +## 1.7.0 (21 Sep 2017) Enhancements: @@ -407,7 +423,7 @@ Enhancements: [#487]: https://github.com/uber-go/zap/pull/487 -## v1.6.0 (30 Aug 2017) +## 1.6.0 (30 Aug 2017) Enhancements: @@ -418,7 +434,7 @@ Enhancements: [#490]: https://github.com/uber-go/zap/pull/490 [#491]: https://github.com/uber-go/zap/pull/491 -## v1.5.0 (22 Jul 2017) +## 1.5.0 (22 Jul 2017) Enhancements: @@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release. [#460]: https://github.com/uber-go/zap/pull/460 [#470]: https://github.com/uber-go/zap/pull/470 -## v1.4.1 (08 Jun 2017) +## 1.4.1 (08 Jun 2017) This release fixes two bugs. @@ -448,7 +464,7 @@ Bugfixes: [#435]: https://github.com/uber-go/zap/pull/435 [#444]: https://github.com/uber-go/zap/pull/444 -## v1.4.0 (12 May 2017) +## 1.4.0 (12 May 2017) This release adds a few small features and is fully backward-compatible. @@ -464,7 +480,7 @@ Enhancements: [#425]: https://github.com/uber-go/zap/pull/425 [#431]: https://github.com/uber-go/zap/pull/431 -## v1.3.0 (25 Apr 2017) +## 1.3.0 (25 Apr 2017) This release adds an enhancement to zap's testing helpers as well as the ability to marshal an AtomicLevel. It is fully backward-compatible. @@ -478,7 +494,7 @@ Enhancements: [#415]: https://github.com/uber-go/zap/pull/415 [#416]: https://github.com/uber-go/zap/pull/416 -## v1.2.0 (13 Apr 2017) +## 1.2.0 (13 Apr 2017) This release adds a gRPC compatibility wrapper. It is fully backward-compatible. @@ -489,7 +505,7 @@ Enhancements: [#402]: https://github.com/uber-go/zap/pull/402 -## v1.1.0 (31 Mar 2017) +## 1.1.0 (31 Mar 2017) This release fixes two bugs and adds some enhancements to zap's testing helpers. It is fully backward-compatible. @@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release. [#396]: https://github.com/uber-go/zap/pull/396 [#386]: https://github.com/uber-go/zap/pull/386 -## v1.0.0 (14 Mar 2017) +## 1.0.0 (14 Mar 2017) This is zap's first stable release. All exported APIs are now final, and no further breaking changes will be made in the 1.x release series. Anyone using a @@ -569,7 +585,7 @@ contributions to this release. [#365]: https://github.com/uber-go/zap/pull/365 [#372]: https://github.com/uber-go/zap/pull/372 -## v1.0.0-rc.3 (7 Mar 2017) +## 1.0.0-rc.3 (7 Mar 2017) This is the third release candidate for zap's stable release. There are no breaking changes. @@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release. [#353]: https://github.com/uber-go/zap/pull/353 [#311]: https://github.com/uber-go/zap/pull/311 -## v1.0.0-rc.2 (21 Feb 2017) +## 1.0.0-rc.2 (21 Feb 2017) This is the second release candidate for zap's stable release. It includes two breaking changes. @@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release. [#326]: https://github.com/uber-go/zap/pull/326 [#300]: https://github.com/uber-go/zap/pull/300 -## v1.0.0-rc.1 (14 Feb 2017) +## 1.0.0-rc.1 (14 Feb 2017) This is the first release candidate for zap's stable release. There are multiple breaking changes and improvements from the pre-release version. Most notably: @@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably: * Sampling is more accurate, and doesn't depend on the standard library's shared timer heap. -## v0.1.0-beta.1 (6 Feb 2017) +## 0.1.0-beta.1 (6 Feb 2017) This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and upgrade at their leisure. Since this is the first tagged release, there are no diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE similarity index 100% rename from vendor/go.uber.org/zap/LICENSE.txt rename to vendor/go.uber.org/zap/LICENSE diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md index 9de08927be..a17035cb6f 100644 --- a/vendor/go.uber.org/zap/README.md +++ b/vendor/go.uber.org/zap/README.md @@ -1,7 +1,16 @@ -# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] +# :zap: zap + + +
Blazing fast, structured, leveled logging in Go. +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ ## Installation `go get -u go.uber.org/zap` @@ -66,41 +75,44 @@ Log a message and 10 fields: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 1744 ns/op | +0% | 5 allocs/op -| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op -| zerolog | 918 ns/op | -47% | 1 allocs/op -| go-kit | 5590 ns/op | +221% | 57 allocs/op -| slog | 5640 ns/op | +223% | 40 allocs/op -| apex/log | 21184 ns/op | +1115% | 63 allocs/op -| logrus | 24338 ns/op | +1296% | 79 allocs/op -| log15 | 26054 ns/op | +1394% | 74 allocs/op +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op Log a message with a logger that already has 10 fields of context: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 193 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op -| zerolog | 81 ns/op | -58% | 0 allocs/op -| slog | 322 ns/op | +67% | 0 allocs/op -| go-kit | 5377 ns/op | +2686% | 56 allocs/op -| apex/log | 19518 ns/op | +10013% | 53 allocs/op -| log15 | 19812 ns/op | +10165% | 70 allocs/op -| logrus | 21997 ns/op | +11297% | 68 allocs/op +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op Log a static string, without any context or `printf`-style templating: | Package | Time | Time % to zap | Objects Allocated | | :------ | :--: | :-----------: | :---------------: | -| :zap: zap | 165 ns/op | +0% | 0 allocs/op -| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op -| zerolog | 95 ns/op | -42% | 0 allocs/op -| slog | 296 ns/op | +79% | 0 allocs/op -| go-kit | 415 ns/op | +152% | 9 allocs/op -| standard library | 422 ns/op | +156% | 2 allocs/op -| apex/log | 1601 ns/op | +870% | 5 allocs/op -| logrus | 3017 ns/op | +1728% | 23 allocs/op -| log15 | 3469 ns/op | +2002% | 20 allocs/op +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op ## Development Status: Stable @@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt). +Released under the [MIT License](LICENSE). 1 In particular, keep in mind that we may be benchmarking against slightly older versions of other packages. Versions are diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go index 27fb5cd5da..0b8540c213 100644 --- a/vendor/go.uber.org/zap/buffer/buffer.go +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) { b.bs = append(b.bs, v) } -// AppendBytes writes a single byte to the Buffer. +// AppendBytes writes the given slice of bytes to the Buffer. func (b *Buffer) AppendBytes(v []byte) { b.bs = append(b.bs, v...) } diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go index c8dd3358a9..6743930b82 100644 --- a/vendor/go.uber.org/zap/field.go +++ b/vendor/go.uber.org/zap/field.go @@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { // - https://github.com/uber-go/zap/pull/1304 // - https://github.com/uber-go/zap/pull/1305 // - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. type anyFieldC[T any] func(string, T) Field func (f anyFieldC[T]) Any(key string, val any) Field { diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go index 6205fe48a6..c4d3003239 100644 --- a/vendor/go.uber.org/zap/logger.go +++ b/vendor/go.uber.org/zap/logger.go @@ -43,6 +43,7 @@ type Logger struct { development bool addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic onFatal zapcore.CheckWriteHook // default is WriteThenFatal name string @@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { // Set up any required terminal behavior. switch ent.Level { case zapcore.PanicLevel: - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) case zapcore.FatalLevel: - onFatal := log.onFatal - // nil or WriteThenNoop will lead to continued execution after - // a Fatal log entry, which is unexpected. For example, - // - // f, err := os.Open(..) - // if err != nil { - // log.Fatal("cannot open", zap.Error(err)) - // } - // fmt.Println(f.Name()) - // - // The f.Name() will panic if we continue execution after the - // log.Fatal. - if onFatal == nil || onFatal == zapcore.WriteThenNoop { - onFatal = zapcore.WriteThenFatal - } - ce = ce.After(ent, onFatal) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) case zapcore.DPanicLevel: if log.development { - ce = ce.After(ent, zapcore.WriteThenPanic) + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) } } @@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { return ce } + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go index c4f3bca3d2..43d357ac90 100644 --- a/vendor/go.uber.org/zap/options.go +++ b/vendor/go.uber.org/zap/options.go @@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option { }) } +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + // OnFatal sets the action to take on fatal logs. // // Deprecated: Use [WithFatalHook] instead. diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go index 00ac5fe3ac..8904cd0871 100644 --- a/vendor/go.uber.org/zap/sugar.go +++ b/vendor/go.uber.org/zap/sugar.go @@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} } +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + // Level reports the minimum enabled level for this logger. // // For NopLoggers, this is [zapcore.InvalidLevel]. @@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level { return zapcore.LevelOf(s.base.core) } +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + // Debug logs the provided arguments at [DebugLevel]. // Spaces are added between arguments when neither is a string. func (s *SugaredLogger) Debug(args ...interface{}) { @@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) { s.log(FatalLevel, "", args, nil) } +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + // Debugf formats the message according to the format specifier // and logs it at [DebugLevel]. func (s *SugaredLogger) Debugf(template string, args ...interface{}) { @@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { s.log(FatalLevel, template, args, nil) } +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + // Debugw logs a message with some additional context. The variadic key-value // pairs are treated as they are in With. // @@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { s.log(FatalLevel, msg, nil, keysAndValues) } +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + // Debugln logs a message at [DebugLevel]. // Spaces are always added between arguments. func (s *SugaredLogger) Debugln(args ...interface{}) { diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go index 8ca0bfaf56..cc2b4e07b9 100644 --- a/vendor/go.uber.org/zap/zapcore/console_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, // If this ever becomes a performance bottleneck, we can implement // ArrayEncoder for our plain-text format. arr := getSliceEncoder() - if c.TimeKey != "" && c.EncodeTime != nil { + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { c.EncodeTime(ent.Time, arr) } if c.LevelKey != "" && c.EncodeLevel != nil { diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go index 5769ff3e4e..0446254156 100644 --- a/vendor/go.uber.org/zap/zapcore/encoder.go +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -37,6 +37,9 @@ const DefaultLineEnding = "\n" const OmitKey = "" // A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type LevelEncoder func(Level, PrimitiveArrayEncoder) // LowercaseLevelEncoder serializes a Level to a lowercase string. For example, @@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error { } // A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type TimeEncoder func(time.Time, PrimitiveArrayEncoder) // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds @@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error { } // A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. @@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error { } // A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) // FullCallerEncoder serializes a caller in /full/path/to/package/file:line @@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error { // A NameEncoder serializes a period-separated logger name to a primitive // type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. type NameEncoder func(string, PrimitiveArrayEncoder) // FullNameEncoder serializes the logger name as-is. diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go index 95bdb0a126..308c9781ed 100644 --- a/vendor/go.uber.org/zap/zapcore/field.go +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -47,7 +47,7 @@ const ( ByteStringType // Complex128Type indicates that the field carries a complex128. Complex128Type - // Complex64Type indicates that the field carries a complex128. + // Complex64Type indicates that the field carries a complex64. Complex64Type // DurationType indicates that the field carries a time.Duration. DurationType diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go index c8ab86979b..9685169b2e 100644 --- a/vendor/go.uber.org/zap/zapcore/json_encoder.go +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, final.AppendString(ent.Level.String()) } } - if final.TimeKey != "" { + if final.TimeKey != "" && !ent.Time.IsZero() { final.AddTime(final.TimeKey, ent.Time) } if ent.LoggerName != "" && final.NameKey != "" { diff --git a/vendor/modules.txt b/vendor/modules.txt index cd85c7d26e..29feff1a96 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -19,6 +19,16 @@ github.com/cenkalti/backoff/v3 # github.com/ceph/ceph-csi/api v0.0.0-20240322131550-063319f6e516 ## explicit; go 1.21 github.com/ceph/ceph-csi/api/deploy/ocp +# github.com/ceph/go-ceph v0.26.0 +## explicit; go 1.19 +github.com/ceph/go-ceph/internal/callbacks +github.com/ceph/go-ceph/internal/cutil +github.com/ceph/go-ceph/internal/errutil +github.com/ceph/go-ceph/internal/log +github.com/ceph/go-ceph/internal/retry +github.com/ceph/go-ceph/internal/timespec +github.com/ceph/go-ceph/rados +github.com/ceph/go-ceph/rbd # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -191,6 +201,9 @@ github.com/hashicorp/vault/api/auth/kubernetes # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo +# github.com/inconshreveable/mousetrap v1.1.0 +## explicit; go 1.18 +github.com/inconshreveable/mousetrap # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern @@ -392,6 +405,11 @@ github.com/red-hat-storage/ocs-client-operator/api/v1alpha1 ## explicit; go 1.21 github.com/red-hat-storage/ocs-operator/api/v4/v1 github.com/red-hat-storage/ocs-operator/api/v4/v1alpha1 +# github.com/rook/rook v1.14.3 +## explicit; go 1.21 +github.com/rook/rook/pkg/client/clientset/versioned +github.com/rook/rook/pkg/client/clientset/versioned/scheme +github.com/rook/rook/pkg/client/clientset/versioned/typed/ceph.rook.io/v1 # github.com/rook/rook/pkg/apis v0.0.0-20240529164429-48b657099d3c ## explicit; go 1.21 github.com/rook/rook/pkg/apis/ceph.rook.io @@ -402,6 +420,9 @@ github.com/ryanuber/go-glob # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus +# github.com/spf13/cobra v1.8.0 +## explicit; go 1.15 +github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag @@ -422,7 +443,7 @@ go.mongodb.org/mongo-driver/x/bsonx/bsoncore # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr -# go.uber.org/zap v1.26.0 +# go.uber.org/zap v1.27.0 ## explicit; go 1.19 go.uber.org/zap go.uber.org/zap/buffer @@ -676,7 +697,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.29.2 +# k8s.io/apiextensions-apiserver v0.29.3 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -740,7 +761,7 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.2 +# k8s.io/apiserver v0.29.3 ## explicit; go 1.21 k8s.io/apiserver/pkg/authentication/serviceaccount k8s.io/apiserver/pkg/authentication/user @@ -891,7 +912,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/component-base v0.29.2 +# k8s.io/component-base v0.29.3 ## explicit; go 1.21 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1