diff --git a/images/multus-cni/main.tf b/images/multus-cni/main.tf index 13e317ac0c..f166820b9a 100644 --- a/images/multus-cni/main.tf +++ b/images/multus-cni/main.tf @@ -19,8 +19,9 @@ module "multus-cni" { } module "test" { - source = "./tests" - digest = module.multus-cni.image_ref + source = "./tests" + digest = module.multus-cni.image_ref + target_repository = var.target_repository } resource "oci_tag" "latest" { diff --git a/images/multus-cni/tests/deploy.sh b/images/multus-cni/tests/deploy.sh deleted file mode 100755 index 8644cc6196..0000000000 --- a/images/multus-cni/tests/deploy.sh +++ /dev/null @@ -1,346 +0,0 @@ -#!/bin/bash - -# Function to check pod status -check_pod_status() { - local pod_name="$1" - local namespace="$2" - kubectl get pod "$pod_name" -n "$namespace" &>/dev/null -} - -# Function to delete the pod if it exists -delete_pod_if_exists() { - local pod_name="$1" - local namespace="$2" - if check_pod_status "$pod_name" "$namespace"; then - kubectl delete pod "$pod_name" -n "$namespace" &>/dev/null - fi -} - -# Apply the Multus CNI DaemonSet manifest -cat < multus-values.yaml -# Note: -# This deployment file is designed for 'quickstart' of multus, easy installation to test it, -# hence this deployment yaml does not care about following things intentionally. -# - various configuration options -# - minor deployment scenario -# - upgrade/update/uninstall scenario -# Multus team understand users deployment scenarios are diverse, hence we do not cover -# comprehensive deployment scenario. We expect that it is covered by each platform deployment. ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: network-attachment-definitions.k8s.cni.cncf.io -spec: - group: k8s.cni.cncf.io - scope: Namespaced - names: - plural: network-attachment-definitions - singular: network-attachment-definition - kind: NetworkAttachmentDefinition - shortNames: - - net-attach-def - versions: - - name: v1 - served: true - storage: true - schema: - openAPIV3Schema: - description: 'NetworkAttachmentDefinition is a CRD schema specified by the Network Plumbing - Working Group to express the intent for attaching pods to one or more logical or physical - networks. More information available at: https://github.com/k8snetworkplumbingwg/multi-net-spec' - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this represen - tation of an object. Servers should convert recognized schemas to the - latest internal value, and may reject unrecognized values. More info: - https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: 'NetworkAttachmentDefinition spec defines the desired state of a network attachment' - type: object - properties: - config: - description: 'NetworkAttachmentDefinition config is a JSON-formatted CNI configuration' - type: string ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: multus -rules: - - apiGroups: ["k8s.cni.cncf.io"] - resources: - - '*' - verbs: - - '*' - - apiGroups: - - "" - resources: - - pods - - pods/status - verbs: - - get - - list - - update - - watch - - apiGroups: - - "" - - events.k8s.io - resources: - - events - verbs: - - create - - patch - - update ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: multus -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: multus -subjects: - - kind: ServiceAccount - name: multus - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: multus - namespace: kube-system ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: multus-daemon-config - namespace: kube-system - labels: - tier: node - app: multus -data: - daemon-config.json: | - { - "chrootDir": "/hostroot", - "cniVersion": "0.3.1", - "logLevel": "verbose", - "logToStderr": true, - "cniConfigDir": "/host/etc/cni/net.d", - "multusAutoconfigDir": "/host/etc/cni/net.d", - "multusConfigFile": "auto", - "socketDir": "/host/run/multus/" - } ---- -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: kube-multus-ds - namespace: kube-system - labels: - tier: node - app: multus - name: multus -spec: - selector: - matchLabels: - name: multus - updateStrategy: - type: RollingUpdate - template: - metadata: - labels: - tier: node - app: multus - name: multus - spec: - hostNetwork: true - hostPID: true - tolerations: - - operator: Exists - effect: NoSchedule - - operator: Exists - effect: NoExecute - serviceAccountName: multus - containers: - - name: kube-multus - image: ${IMAGE_NAME} - command: [ "/usr/src/multus-cni/bin/multus-daemon" ] - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: true - volumeMounts: - - name: cni - mountPath: /host/etc/cni/net.d - # multus-daemon expects that cnibin path must be identical between pod and container host. - # e.g. if the cni bin is in '/opt/cni/bin' on the container host side, then it should be mount to '/opt/cni/bin' in multus-daemon, - # not to any other directory, like '/opt/bin' or '/usr/bin'. - - name: cnibin - mountPath: /opt/cni/bin - - name: host-run - mountPath: /host/run - - name: host-var-lib-cni-multus - mountPath: /var/lib/cni/multus - - name: host-var-lib-kubelet - mountPath: /var/lib/kubelet - - name: host-run-k8s-cni-cncf-io - mountPath: /run/k8s.cni.cncf.io - - name: host-run-netns - mountPath: /run/netns - mountPropagation: HostToContainer - - name: multus-daemon-config - mountPath: /etc/cni/net.d/multus.d - readOnly: true - - name: hostroot - mountPath: /hostroot - mountPropagation: HostToContainer - env: - - name: MULTUS_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - initContainers: - - name: install-multus-binary - image: ${IMAGE_NAME} - command: - - "cp" - - "/usr/src/multus-cni/bin/multus-shim" - - "/host/opt/cni/bin/multus-shim" - resources: - requests: - cpu: "10m" - memory: "15Mi" - securityContext: - privileged: true - volumeMounts: - - name: cnibin - mountPath: /host/opt/cni/bin - mountPropagation: Bidirectional - terminationGracePeriodSeconds: 10 - volumes: - - name: cni - hostPath: - path: /etc/cni/net.d - - name: cnibin - hostPath: - path: /opt/cni/bin - - name: hostroot - hostPath: - path: / - - name: multus-daemon-config - configMap: - name: multus-daemon-config - items: - - key: daemon-config.json - path: daemon-config.json - - name: host-run - hostPath: - path: /run - - name: host-var-lib-cni-multus - hostPath: - path: /var/lib/cni/multus - - name: host-var-lib-kubelet - hostPath: - path: /var/lib/kubelet - - name: host-run-k8s-cni-cncf-io - hostPath: - path: /run/k8s.cni.cncf.io - - name: host-run-netns - hostPath: - path: /run/netns/ -EOF -kubectl apply -f multus-values.yml &>/dev/null - -NAMESPACE="multus" -kubectl create namespace "$NAMESPACE" &>/dev/null -kubectl create serviceaccount multus -n "$NAMESPACE" &>/dev/null - -cat < multus-pod.yaml -apiVersion: v1 -kind: Pod -metadata: - name: multus-test-pod - namespace: $NAMESPACE - annotations: - k8s.v1.cni.cncf.io/networks: '[{"name":"flannel-network"},{"name":"default-network"}]' -spec: - serviceAccountName: multus - containers: - - name: multus-test-container - image: $IMAGE_NAME - command: - - sleep - - "3600" - stdin: true - tty: true - resources: - requests: - memory: "64Mi" - cpu: "250m" - limits: - memory: "128Mi" - cpu: "500m" -EOF - -kubectl apply -f multus-pod.yaml &>/dev/null - -# Total time to wait in seconds (3 minutes) -TOTAL_WAIT_TIME=$((3 * 60)) - -# Waiting time between checks in seconds (5 seconds) -WAIT_INTERVAL=5 - -# Total expected time -TOTAL_WAITED=0 - -# pod name -POD_NAME="multus-test-pod" - -while [ $TOTAL_WAITED -lt $TOTAL_WAIT_TIME ]; do - # Get pod status - POD_STATUS=$(kubectl get pod "$POD_NAME" -n "$NAMESPACE" -o jsonpath='{.status.phase}') - - # Check if the status is 'Running' - if [ "$POD_STATUS" == "Running" ]; then - echo "The pod is in Running state." - - # Test connectivity from the pod to another resource within the cluster - kubectl exec "$POD_NAME" -n "$NAMESPACE" -- sh -c "ping -c 3 google.com" - - # Check if the pod still exists before trying to delete it - if check_pod_status "$POD_NAME" "$NAMESPACE"; then - delete_pod_if_exists "$POD_NAME" "$NAMESPACE" - fi - - # Delete the Multus CNI DaemonSet - kubectl delete -f https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/master/deployments/multus-daemonset-thick.yml &>/dev/null - - exit 0 - fi - - # Increase total expected time - TOTAL_WAITED=$((TOTAL_WAITED + WAIT_INTERVAL)) - - # Wait before next check - sleep "$WAIT_INTERVAL" -done - -# If the pod is not in 'Running' state after 3 minutes, exit with exit code 1 -echo "The pod is not in Running state after 3 minutes." -exit 1 diff --git a/images/multus-cni/tests/main.tf b/images/multus-cni/tests/main.tf index db96838b7c..a55d1642e0 100644 --- a/images/multus-cni/tests/main.tf +++ b/images/multus-cni/tests/main.tf @@ -1,35 +1,83 @@ terraform { required_providers { - oci = { source = "chainguard-dev/oci" } + oci = { source = "chainguard-dev/oci" } + imagetest = { source = "chainguard-dev/imagetest" } } } +variable "target_repository" {} + variable "digest" { description = "The image digest to run tests over." } -data "oci_exec_test" "manifest" { - digest = var.digest - script = "./deploy.sh" - working_dir = path.module +locals { parsed = provider::oci::parse(var.digest) } + +data "imagetest_inventory" "this" {} + +module "cluster_harness" { + source = "../../../tflib/imagetest/harnesses/k3s/" + + inventory = data.imagetest_inventory.this + name = basename(path.module) + target_repository = var.target_repository + cwd = path.module } -resource "random_pet" "suffix" {} +module "helm" { + source = "../../../tflib/imagetest/helm" + + repo = "https://rke2-charts.rancher.io" + chart = "rke2-multus" + namespace = "kube-system" -resource "helm_release" "helm" { - name = "multus-cni-${random_pet.suffix.id}" - namespace = "multus-cni-${random_pet.suffix.id}" - repository = "https://startechnica.github.io/apps" - chart = "multus-cni" - version = "0.1.4" - create_namespace = true + values = { + image = { + repository = local.parsed.registry_repo + tag = local.parsed.pseudo_tag + } - values = [file("${path.module}/values.yaml")] + config = { + cni_conf = { + confDir = "/var/lib/rancher/k3s/agent/etc/cni/net.d" + clusterNetwork = "/var/lib/rancher/k3s/agent/etc/cni/net.d/10-flannel.conflist" + binDir = "/var/lib/rancher/k3s/data/current/bin/" + kubeconfig = "/var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig" + } + } + } } -module "helm_cleanup" { - source = "../../../tflib/helm-cleanup" - name = helm_release.helm.id - namespace = helm_release.helm.namespace - depends_on = [helm_release.helm] +resource "imagetest_feature" "basic" { + name = "basic" + description = "Basic installation" + harness = module.cluster_harness.harness + + steps = [ + { + name = "Helm Install" + cmd = module.helm.install_cmd + }, + { + name = "Apply multus-test-pod using macvlan network interface" + cmd = <