diff --git a/internal/containers/provider/docker.go b/internal/containers/provider/docker.go index 65f79e9..ca5bd69 100644 --- a/internal/containers/provider/docker.go +++ b/internal/containers/provider/docker.go @@ -23,6 +23,7 @@ import ( "github.com/docker/docker/api/types/registry" "github.com/docker/docker/client" "github.com/docker/docker/pkg/stdcopy" + "github.com/docker/go-connections/nat" "github.com/google/go-containerregistry/pkg/authn" ) @@ -155,6 +156,11 @@ func (p *DockerProvider) Start(ctx context.Context) error { return fmt.Errorf("creating network: %w", err) } + exposedPorts := make(nat.PortSet) + for port := range p.req.Ports { + exposedPorts[port] = struct{}{} + } + config := &container.Config{ Image: p.req.Ref.Name(), User: p.req.User, @@ -164,6 +170,7 @@ func (p *DockerProvider) Start(ctx context.Context) error { AttachStdout: true, AttachStderr: true, Labels: p.labels, + ExposedPorts: exposedPorts, } hostConfig := &container.HostConfig{ @@ -180,6 +187,7 @@ func (p *DockerProvider) Start(ctx context.Context) error { // mirroring what's done in Docker CLI: https://github.com/docker/cli/blob/0ad1d55b02910f4b40462c0d01aac2934eb0f061/cli/command/container/update.go#L117 NanoCPUs: p.req.Resources.CpuRequest.Value(), }, + PortBindings: p.req.Ports, } if err := p.pull(ctx); err != nil { diff --git a/internal/containers/provider/provider.go b/internal/containers/provider/provider.go index 31a3d1b..9305b16 100644 --- a/internal/containers/provider/provider.go +++ b/internal/containers/provider/provider.go @@ -8,6 +8,7 @@ import ( "io" "path/filepath" + "github.com/docker/go-connections/nat" "github.com/google/go-containerregistry/pkg/name" "k8s.io/apimachinery/pkg/api/resource" ) @@ -38,6 +39,7 @@ type ContainerRequest struct { // An abstraction over common memory/cpu/disk resources requests and limits Resources ContainerResourcesRequest Labels map[string]string + Ports nat.PortMap } type ContainerResourcesRequest struct { diff --git a/internal/harnesses/k3s/k3s.go b/internal/harnesses/k3s/k3s.go index adf7894..e2d0a0a 100644 --- a/internal/harnesses/k3s/k3s.go +++ b/internal/harnesses/k3s/k3s.go @@ -13,10 +13,12 @@ import ( "github.com/chainguard-dev/terraform-provider-imagetest/internal/log" "github.com/chainguard-dev/terraform-provider-imagetest/internal/types" "github.com/docker/docker/api/types/mount" + "github.com/docker/go-connections/nat" "github.com/google/go-containerregistry/pkg/name" "golang.org/x/sync/errgroup" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/clientcmd" ) const ( @@ -110,6 +112,15 @@ func New(id string, cli *provider.DockerClient, opts ...Option) (types.Harness, return nil, fmt.Errorf("creating k3s registries config: %w", err) } + ports := nat.PortMap{} + if harnessOptions.HostPort > 0 { + ports = nat.PortMap{ + "6443/tcp": []nat.PortBinding{ + {HostIP: "0.0.0.0", HostPort: fmt.Sprintf("%d", harnessOptions.HostPort)}, + }, + } + } + service := provider.NewDocker(id, cli, provider.DockerRequest{ ContainerRequest: provider.ContainerRequest{ Ref: harnessOptions.ImageRef, @@ -129,6 +140,7 @@ func New(id string, cli *provider.DockerClient, opts ...Option) (types.Harness, }, }, Resources: harnessOptions.Resources, + Ports: ports, }, ManagedVolumes: []mount.Mount{ { @@ -199,6 +211,36 @@ KUBECONFIG=/etc/rancher/k3s/k3s.yaml k3s kubectl config set-cluster default --se return fmt.Errorf("creating kubeconfig: %w", err) } + if h.opt.HostKubeconfigPath != "" { + log.Info(ctx, "Writing kubeconfig to host", "path", h.opt.HostKubeconfigPath) + kr, err := h.service.Exec(ctx, provider.ExecConfig{ + Command: `KUBECONFIG=/etc/rancher/k3s/k3s.yaml kubectl config view --raw &2> /dev/null`, + }) + if err != nil { + return fmt.Errorf("writing kubeconfig to host: %w", err) + } + + data, err := io.ReadAll(kr) + if err != nil { + return fmt.Errorf("reading kubeconfig from host: %w", err) + } + + cfg, err := clientcmd.Load(data) + if err != nil { + return fmt.Errorf("loading kubeconfig: %w", err) + } + + _, ok := cfg.Clusters["default"] + if !ok { + return fmt.Errorf("no default context found in kubeconfig") + } + cfg.Clusters["default"].Server = fmt.Sprintf("https://127.0.0.1:%d", h.opt.HostPort) + + if err := clientcmd.WriteToFile(*cfg, h.opt.HostKubeconfigPath); err != nil { + return fmt.Errorf("writing kubeconfig to host: %w", err) + } + } + // Run the post start hooks for _, hook := range h.opt.Hooks.PostStart { log.Info(ctx, "K3S Running post start hook", "hook", hook) diff --git a/internal/harnesses/k3s/opts.go b/internal/harnesses/k3s/opts.go index 8f050dc..380b86e 100644 --- a/internal/harnesses/k3s/opts.go +++ b/internal/harnesses/k3s/opts.go @@ -20,6 +20,13 @@ type Opt struct { Resources provider.ContainerResourcesRequest Hooks Hooks + // HostPort exposes the clusters apiserver on a given port when set + HostPort int + + // HostKubeconfigPath writes the clusters kubeconfig to a given path on the + // host, this is optional and does nothing if not set + HostKubeconfigPath string + Registries map[string]*RegistryOpt Mirrors map[string]*RegistryMirrorOpt @@ -243,3 +250,18 @@ func WithHooks(hooks Hooks) Option { return nil } } + +// WithHostPort exposes the clusters apiserver on a given port. +func WithHostPort(port int) Option { + return func(o *Opt) error { + o.HostPort = port + return nil + } +} + +func WithHostKubeconfigPath(path string) Option { + return func(o *Opt) error { + o.HostKubeconfigPath = path + return nil + } +} diff --git a/internal/provider/harness_k3s_resource.go b/internal/provider/harness_k3s_resource.go index 2b9a61f..7537077 100644 --- a/internal/provider/harness_k3s_resource.go +++ b/internal/provider/harness_k3s_resource.go @@ -3,6 +3,8 @@ package provider import ( "context" "fmt" + "math/rand" + "net" "os" "path/filepath" @@ -265,6 +267,31 @@ func (r *HarnessK3sResource) Create(ctx context.Context, req resource.CreateRequ } } + // if set, configure the harness to expose the k3s api server on some random, + // unused port, and copy the clusters kubeconfig to the host + if os.Getenv("IMAGETEST_K3S_KUBECONFIG") != "" { + kubeconfigPath := os.Getenv("IMAGETEST_K3S_KUBECONFIG") + + // find an unused exposed port + // NOTE: This isn't concurrency safe, but if we're in this path we're + // already assumed to not support concurrency + var port int + for { + port = rand.Intn(65535-1024) + 1024 + _, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + break + } + } + + resp.Diagnostics.AddWarning("Using k3s harness dev mode, a single (random) k3s harness is exposed to the host and accessible via the kubeconfig file. This works best if only a single k3s harness is created.", + fmt.Sprintf(`You have used IMAGETEST_K3S_KUBECONFIG to toggle the k3s harness dev mode. +The k3s harness will expose the apiserver to the host on port "%d", and write the configured kubeconfig to "%s". +You can access the cluster with something like: "KUBECONFIG=%s kubectl get po -A"`, port, kubeconfigPath, kubeconfigPath)) + + kopts = append(kopts, k3s.WithHostPort(port), k3s.WithHostKubeconfigPath(kubeconfigPath)) + } + id := data.Id.ValueString() configVolumeName := id + "-config"