From 5b18b15716973aeb4280f166886b91019c535295 Mon Sep 17 00:00:00 2001 From: Rewant Soni Date: Mon, 4 Nov 2024 21:11:02 +0530 Subject: [PATCH] add server side implementation Signed-off-by: Rewant Soni --- controllers/util/k8sutil.go | 26 ++++++++ rbac/provider-role.yaml | 1 + services/provider/server/server.go | 99 ++++++++++++++++++++++++++++-- 3 files changed, 121 insertions(+), 5 deletions(-) diff --git a/controllers/util/k8sutil.go b/controllers/util/k8sutil.go index fb1e5504ea..87103da9d1 100644 --- a/controllers/util/k8sutil.go +++ b/controllers/util/k8sutil.go @@ -166,3 +166,29 @@ func GenerateNameForNonResilientCephBlockPoolSC(initData *ocsv1.StorageCluster) } return fmt.Sprintf("%s-ceph-non-resilient-rbd", initData.Name) } + +func GetStorageClusterInNamespace(ctx context.Context, cl client.Client, namespace string) (*ocsv1.StorageCluster, error) { + storageClusterList := &ocsv1.StorageClusterList{} + err := cl.List(ctx, storageClusterList, client.InNamespace(namespace)) + if err != nil { + return nil, fmt.Errorf("unable to list storageCluster(s) in namespace %s: %v", namespace, err) + } + + var foundSc *ocsv1.StorageCluster + for i := range storageClusterList.Items { + sc := &storageClusterList.Items[i] + if sc.Status.Phase == PhaseIgnored { + continue // Skip Ignored storage cluster + } + if foundSc != nil { + // This means we have already found one storage cluster, so this is a second one + return nil, fmt.Errorf("multiple storageClusters found in namespace %s, expected: 1 actual: %v", namespace, len(storageClusterList.Items)) + } + foundSc = sc + } + + if foundSc == nil { + return nil, fmt.Errorf("no storageCluster found in namespace %s, expected: 1", namespace) + } + return foundSc, nil +} diff --git a/rbac/provider-role.yaml b/rbac/provider-role.yaml index dd1c692975..74ed3fa301 100644 --- a/rbac/provider-role.yaml +++ b/rbac/provider-role.yaml @@ -16,6 +16,7 @@ rules: resources: - cephfilesystemsubvolumegroups - cephblockpoolradosnamespaces + - cephblockpools verbs: - get - list diff --git a/services/provider/server/server.go b/services/provider/server/server.go index f12e7e021f..a32781c06d 100644 --- a/services/provider/server/server.go +++ b/services/provider/server/server.go @@ -53,11 +53,12 @@ import ( ) const ( - TicketAnnotation = "ocs.openshift.io/provider-onboarding-ticket" - ProviderCertsMountPoint = "/mnt/cert" - onboardingTicketKeySecret = "onboarding-ticket-key" - storageRequestNameLabel = "ocs.openshift.io/storagerequest-name" - notAvailable = "N/A" + TicketAnnotation = "ocs.openshift.io/provider-onboarding-ticket" + ProviderCertsMountPoint = "/mnt/cert" + onboardingTicketKeySecret = "onboarding-ticket-key" + storageRequestNameLabel = "ocs.openshift.io/storagerequest-name" + notAvailable = "N/A" + rbdMirrorBootstrapPeerSecretName = "rbdMirrorBootstrapPeerSecretName" ) const ( @@ -949,3 +950,91 @@ func extractMonitorIps(data string) ([]string, error) { func (s *OCSProviderServer) PeerStorageCluster(_ context.Context, _ *pb.PeerStorageClusterRequest) (*pb.PeerStorageClusterResponse, error) { return &pb.PeerStorageClusterResponse{}, nil } + +func (s *OCSProviderServer) GetClientsInfo(ctx context.Context, req *pb.GetClientsInfoRequest) (*pb.GetClientsInfoResponse, error) { + storageCluster, err := util.GetStorageClusterInNamespace(ctx, s.client, s.namespace) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get storageCluster. %v", err) + } + + if storageCluster.UID != types.UID(req.StorageClusterUID) { + return nil, status.Errorf(codes.Internal, "storage cluster UID does not match expected UID") + } + + var clientsInfo []*pb.ClientInfo + + for i := range req.ClientIDs { + consumer, err := s.consumerManager.Get(ctx, req.ClientIDs[i]) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get consumer. %v", err) + } + rnsList := &rookCephv1.CephBlockPoolRadosNamespaceList{} + err = s.client.List(ctx, rnsList, client.InNamespace(s.namespace), client.MatchingLabels{controllers.StorageConsumerNameLabel: consumer.Name}, client.Limit(1)) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to list Rados namespace. %v", err) + } + if len(rnsList.Items) == 0 { + return nil, status.Errorf(codes.Internal, "rados namespace for the client not found: %v", consumer.UID) + } + clientsInfo = append(clientsInfo, &pb.ClientInfo{ClientID: req.ClientIDs[i], RadosNamespace: rnsList.Items[0].Name}) + } + + return &pb.GetClientsInfoResponse{ClientsInfo: clientsInfo}, nil +} + +func (s *OCSProviderServer) GetBlockPoolsInfo(ctx context.Context, req *pb.GetBlockPoolsInfoRequest) (*pb.GetBlockPoolsInfoResponse, error) { + storageCluster, err := util.GetStorageClusterInNamespace(ctx, s.client, s.namespace) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get storageCluster. %v", err) + } + + if storageCluster.UID != types.UID(req.StorageClusterUID) { + return nil, status.Errorf(codes.Internal, "storage cluster UID does not match expected UID") + } + + var blockPoolsInfo []*pb.BlockPoolInfo + for i := range req.BlockPoolNames { + cephBlockPool := &rookCephv1.CephBlockPool{} + cephBlockPool.Name = req.BlockPoolNames[i] + cephBlockPool.Namespace = s.namespace + err = s.client.Get(ctx, client.ObjectKeyFromObject(cephBlockPool), cephBlockPool) + if err != nil { + return nil, status.Errorf(codes.Internal, "failed to get CephBlockPool: %v. %v", cephBlockPool.Name, err) + } + + // if the blockPool mirroring is not enabled return an error + if !cephBlockPool.Spec.Mirroring.Enabled { + errMsg := fmt.Sprintf("Mirroring is not enabled for CephBlockPool: %v", cephBlockPool.Name) + klog.Error(errMsg) + return nil, status.Errorf(codes.Internal, errMsg) + } + + if cephBlockPool.Status.Info != nil && + cephBlockPool.Status.Info[rbdMirrorBootstrapPeerSecretName] != "" { + secret := &corev1.Secret{} + secret.Name = cephBlockPool.Status.Info[rbdMirrorBootstrapPeerSecretName] + secret.Namespace = s.namespace + err := s.client.Get(ctx, client.ObjectKeyFromObject(secret), secret) + if err != nil { + errMsg := fmt.Sprintf("Error fetching bootstrap secret %s for CephBlockPool %s: %v", + cephBlockPool.Status.Info[rbdMirrorBootstrapPeerSecretName], + cephBlockPool.Name, + err) + klog.Error(errMsg) + return nil, status.Errorf(codes.Internal, errMsg) + } + + blockPoolsInfo = append(blockPoolsInfo, &pb.BlockPoolInfo{ + BlockPoolName: cephBlockPool.Name, + MirroringToken: string(secret.Data["token"]), + }) + } else { + errMsg := fmt.Sprintf("Bootstrap secret for CephBlockPool %s is not generated", cephBlockPool.Name) + klog.Error(errMsg) + return nil, status.Errorf(codes.Internal, errMsg) + } + + } + + return &pb.GetBlockPoolsInfoResponse{BlockPoolsInfo: blockPoolsInfo}, nil +}