From e88618b0d3754e381a4082091d72899664200019 Mon Sep 17 00:00:00 2001 From: Chris Date: Mon, 25 Nov 2024 16:52:24 +0800 Subject: [PATCH] fix(robot): fix Test Longhorn dynamic provisioned RWX volume recovery longhorn/longhorn-9822 Signed-off-by: Chris --- e2e/keywords/sharemanager.resource | 8 ++-- e2e/libs/keywords/sharemanager_keywords.py | 38 ++++++++++++++----- e2e/tests/negative/component_resilience.robot | 4 +- 3 files changed, 34 insertions(+), 16 deletions(-) diff --git a/e2e/keywords/sharemanager.resource b/e2e/keywords/sharemanager.resource index 6fe84fda83..3e8026de2d 100644 --- a/e2e/keywords/sharemanager.resource +++ b/e2e/keywords/sharemanager.resource @@ -21,12 +21,12 @@ Check sharemanager ${condition} using headless service Wait for all sharemanager to be deleted wait_for_sharemanagers_deleted -Delete sharemanager of deployment ${deployment_id} and wait for recreation +Delete sharemanager pod of deployment ${deployment_id} and wait for recreation ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} ${volume_name} = get_workload_volume_name ${deployment_name} - delete_sharemanager_and_wait_for_recreation ${volume_name} + delete_sharemanager_pod_and_wait_for_recreation ${volume_name} -Wait for sharemanager of deployment ${deployment_id} running +Wait for sharemanager pod of deployment ${deployment_id} running ${deployment_name} = generate_name_with_suffix deployment ${deployment_id} ${volume_name} = get_workload_volume_name ${deployment_name} - wait_for_share_manager_running ${volume_name} + wait_for_share_manager_pod_running ${volume_name} diff --git a/e2e/libs/keywords/sharemanager_keywords.py b/e2e/libs/keywords/sharemanager_keywords.py index f9d501f349..b541f5b26b 100644 --- a/e2e/libs/keywords/sharemanager_keywords.py +++ b/e2e/libs/keywords/sharemanager_keywords.py @@ -7,7 +7,7 @@ from utility.utility import get_retry_count_and_interval from utility.utility import logging - +from utility.utility import get_pod, delete_pod class sharemanager_keywords: @@ -48,14 +48,32 @@ def wait_for_sharemanagers_deleted(self, name=[]): assert AssertionError, f"Failed to wait for all sharemanagers to be deleted" - def delete_sharemanager(self, name): - return self.sharemanager.delete(name) - def delete_sharemanager_and_wait_for_recreation(self, name): - sharemanager = self.sharemanager.get(name) - last_creation_time = sharemanager["metadata"]["creationTimestamp"] - self.sharemanager.delete(name) - self.sharemanager.wait_for_restart(name, last_creation_time) + def delete_sharemanager_pod_and_wait_for_recreation(self, name): + sharemanager_pod_name = "share-manager-" + name + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + last_creation_time = sharemanager_pod.metadata.creation_timestamp + delete_pod(sharemanager_pod_name, "longhorn-system") + + retry_count, retry_interval = get_retry_count_and_interval() + for i in range(retry_count): + time.sleep(retry_interval) + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + if sharemanager_pod == None: + continue + creation_time = sharemanager_pod.metadata.creation_timestamp + if creation_time > last_creation_time: + return + + assert False, f"sharemanager pod {sharemanager_pod_name} not recreated" + + + def wait_for_share_manager_pod_running(self, name): + sharemanager_pod_name = "share-manager-" + name + retry_count, retry_interval = get_retry_count_and_interval() + for i in range(retry_count): + sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system") + if sharemanager_pod.status.phase == "Running": + return - def wait_for_share_manager_running(self, name): - return self.sharemanager.wait_for_running(name) + assert False, f"sharemanager pod {sharemanager_pod_name} not running" diff --git a/e2e/tests/negative/component_resilience.robot b/e2e/tests/negative/component_resilience.robot index 4c5cc50596..fa45633760 100644 --- a/e2e/tests/negative/component_resilience.robot +++ b/e2e/tests/negative/component_resilience.robot @@ -174,8 +174,8 @@ Test Longhorn dynamic provisioned RWX volume recovery And Wait until volume of deployment 0 replica rebuilding started on replica node Then Delete instance-manager of deployment 0 volume and wait for recover - When Delete sharemanager of deployment 0 and wait for recreation - And Wait for sharemanager of deployment 0 running + When Delete sharemanager pod of deployment 0 and wait for recreation + And Wait for sharemanager pod of deployment 0 running And Wait for deployment 0 pods stable And Check deployment 0 data in file data.txt is intact END