How to deploy a MongoDB cluster and Ops Manager on Kubernetes leveraging Minikube, MongoDB Kubernetes Operator, Helm and a single AWS EC2 instance.
- https://www.mongodb.com/docs/kubernetes-operator/master/kind-quick-start/
- https://www.mongodb.com/blog/post/running-mongodb-ops-manager-in-kubernetes
- https://www.mongodb.com/blog/post/tutorial-part-2-ops-manager-in-kubernetes
Launch a AWS EC2 t3.xlarge instance on Ubuntu Server 22.04 LTS with a 30GB root volume. Edit the associated Security Group to allow all inbound traffic to ease the exercise.
ssh -i "<pem-file>.pem" ubuntu@ec2-XX-XXX-XXX-XXX.eu-west-3.compute.amazonaws.com
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/local/bin/kubectl
sudo apt-get update && \
sudo apt-get install docker.io -y
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/
minikube version
sudo apt install conntrack
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.0/cri-dockerd-v0.2.0-linux-amd64.tar.gz
tar xvf cri-dockerd-v0.2.0-linux-amd64.tar.gz
sudo mv ./cri-dockerd /usr/local/bin/
cri-dockerd --help
wget https://raw.githubusercontent.com/Mirantis/cri-dockerd/master/packaging/systemd/cri-docker.service
wget https://raw.githubusercontent.com/Mirantis/cri-dockerd/master/packaging/systemd/cri-docker.socket
sudo mv cri-docker.socket cri-docker.service /etc/systemd/system/
sudo sed -i -e 's,/usr/bin/cri-dockerd,/usr/local/bin/cri-dockerd,' /etc/systemd/system/cri-docker.service
sudo systemctl daemon-reload
sudo systemctl enable cri-docker.service
sudo systemctl enable --now cri-docker.socket
sudo systemctl status cri-docker.socket
sudo sysctl fs.protected_regular=0
VERSION="v1.24.1"
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-$VERSION-linux-amd64.tar.gz
sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin
rm -f crictl-$VERSION-linux-amd64.tar.gz
sudo -i
minikube start --vm-driver=none
minikube status
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
wget -qO - https://www.mongodb.org/static/pgp/server-6.0.asc | sudo apt-key add -
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/6.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-6.0.list
sudo apt-get update
sudo apt-get install -y mongodb-mongosh
minikube dashboard --url
ssh -i "<pem-file>.pem" -L 8081:localhost:[remote port of minikube dashboard] ubuntu@[ec2 public ip]
http://127.0.0.1:8081/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/#/error?namespace=_all
helm repo add mongodb https://mongodb.github.io/helm-charts
helm install enterprise-operator mongodb/enterprise-operator --namespace mongodb --create-namespace
kubectl config set-context $(kubectl config current-context) --namespace=mongodb
kubectl create secret generic ops-manager-admin-secret --from-literal=Username="<email-for-login>" --from-literal=Password="<complex-password>" --from-literal=FirstName="firstname" --from-literal=LastName="lastname" -n mongodb
cat << END_OF_FILE > ops-manager.yaml
apiVersion: mongodb.com/v1
kind: MongoDBOpsManager
metadata:
name: ops-manager
namespace: mongodb
spec:
replicas: 1
version: "6.0.3"
adminCredentials: ops-manager-admin-secret
externalConnectivity:
type: NodePort
applicationDatabase:
members: 3
version: "5.0.5-ent"
END_OF_FILE
kubectl apply -f ops-manager.yaml
kubectl get om -n mongodb
kubectl get om -o yaml -w
kubectl get svc
- Add the proper inbound traffic rule at Security Group level
- Report exposed port for ops-manager-svc-ext to access web portal (ex: http://[ec2 public ip]:31618)
- Fill the Ops Manager admin form and browse the ops-manager-db organization to check the AppDB
- At Access Manager/Organization level, create a new API Key (role = Organization Owner)
- Save keys and add the Kubernetes Operator POD IP address in the Access List
- At Project/Deployment level, hit the Setup Kubernetes button and use the existing API Key
- Generate YAML
apiVersion: v1
kind: Secret
metadata:
name: organization-secret
namespace: mongodb
stringData:
user: <privateKey>
publicApiKey: <publicKey>
apiVersion: v1
kind: ConfigMap
metadata:
name: my-project
namespace: mongodb
data:
baseUrl: http://ops-manager-svc.mongodb.svc.cluster.local:8080
# Optional Parameters
projectName: <projectName>
orgId: <orgId>
kubectl apply -f secret.yaml -f config-map.yaml
cat << END_OF_FILE > replica-set.yaml
apiVersion: mongodb.com/v1
kind: MongoDB
metadata:
name: my-cluster
namespace: mongodb
spec:
members: 3
version: "5.0.5-ent"
type: ReplicaSet
opsManager:
configMapRef:
name: my-project
credentials: organization-secret
END_OF_FILE
kubectl apply -f replica-set.yaml -n mongodb
kubectl get mdb -n mongodb -w
kubectl get mdb -o yaml -w
- Check the provisioning
- Check at Ops Manager level
members: 5
kubectl apply -f replica-set.yaml -n mongodb
mongosh "mongodb://<Primary-Node-POD-IP>:27017"