Skip to content

Commit

Permalink
fix(robot): fix Test Longhorn dynamic provisioned RWX volume recovery
Browse files Browse the repository at this point in the history
longhorn/longhorn-9822

Signed-off-by: Chris <chris.chien@suse.com>
  • Loading branch information
chriscchien authored and yangchiu committed Nov 26, 2024
1 parent 1e777ba commit e88618b
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 16 deletions.
8 changes: 4 additions & 4 deletions e2e/keywords/sharemanager.resource
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,12 @@ Check sharemanager ${condition} using headless service
Wait for all sharemanager to be deleted
wait_for_sharemanagers_deleted

Delete sharemanager of deployment ${deployment_id} and wait for recreation
Delete sharemanager pod of deployment ${deployment_id} and wait for recreation
${deployment_name} = generate_name_with_suffix deployment ${deployment_id}
${volume_name} = get_workload_volume_name ${deployment_name}
delete_sharemanager_and_wait_for_recreation ${volume_name}
delete_sharemanager_pod_and_wait_for_recreation ${volume_name}

Wait for sharemanager of deployment ${deployment_id} running
Wait for sharemanager pod of deployment ${deployment_id} running
${deployment_name} = generate_name_with_suffix deployment ${deployment_id}
${volume_name} = get_workload_volume_name ${deployment_name}
wait_for_share_manager_running ${volume_name}
wait_for_share_manager_pod_running ${volume_name}
38 changes: 28 additions & 10 deletions e2e/libs/keywords/sharemanager_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from utility.utility import get_retry_count_and_interval
from utility.utility import logging

from utility.utility import get_pod, delete_pod

class sharemanager_keywords:

Expand Down Expand Up @@ -48,14 +48,32 @@ def wait_for_sharemanagers_deleted(self, name=[]):

assert AssertionError, f"Failed to wait for all sharemanagers to be deleted"

def delete_sharemanager(self, name):
return self.sharemanager.delete(name)

def delete_sharemanager_and_wait_for_recreation(self, name):
sharemanager = self.sharemanager.get(name)
last_creation_time = sharemanager["metadata"]["creationTimestamp"]
self.sharemanager.delete(name)
self.sharemanager.wait_for_restart(name, last_creation_time)
def delete_sharemanager_pod_and_wait_for_recreation(self, name):
sharemanager_pod_name = "share-manager-" + name
sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system")
last_creation_time = sharemanager_pod.metadata.creation_timestamp
delete_pod(sharemanager_pod_name, "longhorn-system")

retry_count, retry_interval = get_retry_count_and_interval()
for i in range(retry_count):
time.sleep(retry_interval)
sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system")
if sharemanager_pod == None:
continue
creation_time = sharemanager_pod.metadata.creation_timestamp
if creation_time > last_creation_time:
return

assert False, f"sharemanager pod {sharemanager_pod_name} not recreated"


def wait_for_share_manager_pod_running(self, name):
sharemanager_pod_name = "share-manager-" + name
retry_count, retry_interval = get_retry_count_and_interval()
for i in range(retry_count):
sharemanager_pod = get_pod(sharemanager_pod_name, "longhorn-system")
if sharemanager_pod.status.phase == "Running":
return

def wait_for_share_manager_running(self, name):
return self.sharemanager.wait_for_running(name)
assert False, f"sharemanager pod {sharemanager_pod_name} not running"
4 changes: 2 additions & 2 deletions e2e/tests/negative/component_resilience.robot
Original file line number Diff line number Diff line change
Expand Up @@ -174,8 +174,8 @@ Test Longhorn dynamic provisioned RWX volume recovery
And Wait until volume of deployment 0 replica rebuilding started on replica node
Then Delete instance-manager of deployment 0 volume and wait for recover

When Delete sharemanager of deployment 0 and wait for recreation
And Wait for sharemanager of deployment 0 running
When Delete sharemanager pod of deployment 0 and wait for recreation
And Wait for sharemanager pod of deployment 0 running
And Wait for deployment 0 pods stable
And Check deployment 0 data in file data.txt is intact
END
Expand Down

0 comments on commit e88618b

Please sign in to comment.