# Setup permanent autocompletion for `k` alias:
echo "alias k='kubectl'" >> ~/.bashrc
echo "kubectl completion bash" >> ~/.bashrc
echo "complete -F __start_kubectl k" >> ~/.bashrc
# Confirm it is OK:
tail ~/.bashrc
# Source it:
source ~/.bashrc
# Setup autocompletion for `kubectl`, only for the current session:
source <(kubectl completion bash)
# List namespaces without the output headers:
k get ns --no-headers
# Switch default namespace to `dev`:
k config set-context $(k config current-context) --namespace=dev
# Start single pod (it has empty resources: {} section by default, which can be used for requests and limits):
k run redis --image=redis --labels=tier=db --namespace=prod
# Easily generate YAML file for the Deployment:
k create deployment --image=nginx nginx --dry-run=client -o yaml > nginx-deployment.yml
# Scale the deployment and record the command used to history:
k scale deployment nginx --replicas=5 --record
# Create service, expose Pod or Deployment:
k expose pod redis --port=6379 --name db-service
k expose deployment --port=80 --name web-service
# If you don't remember apiVersion:
k api-resources | grep -i <resource>
# For new line, add {"\n"}:
k get pods -o=jsonpath='{...}{"\n"}'
# Important queries:
[?(@.image == "redis:apline")].restartCount
$.[*].metadata.name
# Sometimes you can just use columns:
k get nodes -o=custom-columns=<>:<> --sort-by=<>
# If no scheduler exists and you are creating a new object:
spec:
nodeName: node02
# If no scheduler exists and Pod is already running, use this YAML as JSON payload:
apiVersion: v1
kind: Binding
metadata:
name: nginx
target:
apiVersion: v1
kind: Node
name: node02
{ "apiVersion": "v1", "kind": "Binding", "metadata": { "name": "nginx" }, "target": { "apiVersion": "v1", "kind": "Node", "name": "node02" } }
# Selector (label query) to filter on:
k get pods -l env=dev,colour=red
# Taint on node is like "bug repellent":
k tain nodes node01 app=blue:NoSchedule
# There are 3 types:
# 1. NoSchedule
# 2. PreferNoSchedule
# 3. NoExecute
# To remove taints, add minus (`-`) at the end:
k taint nodes node01 app=blue:NoSchedule-
# We must also specify, which Pods are tolerant to this taint/bug repellent:
spec:
tolerations:
- key: "app"
operator: "Equal"
value: "blue"
effect: "NoSchedule"
# Either use https://kubernetes.io/docs or `k explain pod --recursive | grep -A5 tolerations` if you don't remember it.
- Label nodes
k label nodes node02 disktype=ssd
, confirmk get nodes --show-labels
. - Use
nodeSelector
in the Pod specification.
Type | During Scheduling | During Execution |
---|---|---|
Type 1 | Required | Ignored |
Type 2 | Preferred | Ignored |
Valid operators:
- In
- NotIn
- Exists
- DoesNotExist
- Gt
- Lt
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: disktype
operator: In
values:
- ssd
To completely dedicate nodes ONLY for certain Pods, you will need both taints/tolerations and node affinity.
Limit range = defaults for limits & requests
kubelet
automatically applies pod YAMLs from the default path /etc/kubernetes/manifests
.
Path could be custom, see --pod-manifest-path
in kubelet.service
or staticPodPath
in kubeconfig.yaml
:
ps -ef | grep kubelet
.- Look for value of
--config
to see where isconfig.yaml
. grep -i static /var/lib/kubelet/config.yaml
.
Static Pods have automatic suffix based on where they reside (e.g. -master
).
Copy & edit /etc/kubernetes/manifests/kube-scheduler
, but change the following:
--leader-elect=false
--port=10282
- default is10259
--scheduler-name=my-scheduler
--secure-port=0
- disable HTTPS to avoid cert problems
k top nodes
k top pods
# See the status of the Deployment rollout:
k rollout status deployment <name>
# Change image for the deployment:
k set image deployment <name> <container>=image:version --record
# Undo the Deployment:
k rollout undo deployment <name>
# Strategy:
# 1. Recreate
# 2. Rolling update (default)
Dockerfile
:
ENTRYPOINT
CMD
In Kubernetes, it is little confusing:
Dockerfile | Kubernetes specification |
---|---|
spec: |
|
containers: |
|
- name: Ubuntu |
|
FROM Ubuntu |
image: Ubuntu |
ENTRYPOINT ["sleep"] |
command: ["sleep2.0"] |
CMD ["5"] |
args: ["10"] |
command
overwrites ENTRYPOINT
and args
overwrites CMD
.
env:
- name: APP_COLOR
value: pink
env:
- name: APP_COLOR
valueFrom:
configMapKeyRef:
env:
- name: APP_COLOR
valueFrom:
secretKeyRef:
k create cm foo --from-literal=APP_COLOR=blue
k create cm foo --from-file=cm.txt
# Then in the Pod definition:
spec:
envFrom:
- configMapRef:
name: foo
volumes:
- name: foo
configMap:
name: foo
# After 5 minutes of unaccessible Pod, it is considered dead:
kube-controller-manager --pod-eviction-timeout=5m0s
# Evict all pods and make node unschedulable:
k drain node01
# Only make node unschedulable:
k cordon node01
# Make the node schedulable again:
k uncordon node01
# You can only move one MINOR version up:
k cluster-info
apt-get upgrade -y kubeadm=1.18.0-00 kubelet=1.18.0-00 kubectl=1.18.0-00
kubeadm upgrade plan
k drain controlnode
kubeadm upgrade apply
k uncordon controlnode
# On the workers:
apt-get upgrade -y kubeadm=1.18.0-00 kubelet=1.18.0-00 kubectl=1.18.0-00
kubeadm upgrade node
systemctl restart kubelet
k uncordon node01
export ETCDCTL_API=3
etcdctl snapshot save /root/etcdbackup.db --cacert <> --cert <> --key <> --endpoints <>
etcdctl snapshot restore /root/etcdbackup.db --cacert <> --cert <> --key <> --endpoints <>
# See the details about the certificate:
openssl x509 -in <cert.crt> -text -noout
# Certification Authority
# Generate keys:
openssl genrsa -out ca.key 2048
# Generate CSR:
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
# Sign certificate:
openssl x509 -req -in ca.csr -signkey ca.key -out ca.crt
kubeconfig
by default in $HOME/.kube/config
. It contains:
- Clusters - e.g. Production
- Contexts - e.g. Admin@Production
- Users - e.g. Admin
k config view
k config view --kubeconfig <>
k config use-context prod-user@production
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john-developer
spec:
# `cat user.csr | base64 | tr -d "\n"` to the value below
request: <base64 of the CSR file>
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
# After applying CertificateSigningRequest, see it pending:
k get csr
# Approve the request (sign with Kube CA):
k certificate approve john-developer
# Create Role:
k create role developer --resource=pods --verb=create,list,get,update,delete -n development
# Create RoleBinding:
k create rolebinding developer-role-binding --role=developer --user=john -n development
# Test if it works:
k auth can-i update pods --as=john-developer -n development
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: developer
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "create", "update"]
resourceNames: ["blue", "orange"]
k api-resources --namespaced=true
k api-resources --namespaced=false
Role vs ClusterRole RoleBinding vs ClusterRoleBinding
spec:
serviceAccountName:
imagePullSecrets:
# The following can be under each container as well! If specified in both, container spec has higher priority than Pod
securityContext:
runAsUser:
# In fact, capabilities is only supported on the container level
capabilities:
add:
First, specify Pod which this policy will apply to using:
spec:
podSelector:
matchLabels:
role: db
Next, what is the direction - Ingress/Egress:
spec:
policyTypes:
- Ingress
Define specifics:
spec:
ingress:
- from:
- podSelector:
matchLabels:
name: api-pod
ports:
- protocol: TCP
port: 3306
If you want to imit namespace too, add:
ingress:
- from:
namespaceSelector:
matchLabels:
# Must be labeled on the namespace!
name: prod
There is also IP address blocking:
cidr: 192.168.5.10/31
Without dash (-
) at the beginning of the rule changes the logic (AND
). With dash, it is OR
!
Example of the egress rule:
policyTypes:
- Egress
egress:
- to:
ipBlock:
cidr: 192.168.5.10/31
ports:
- protocol: TCP
port: 80
- to:
- podSelector:
matchLabels:
name: payroll
ports:
- protocol: TCP
port: 8080
Example of the rule that is allowed from ANYWHERE:
spec:
ingress:
# No "from:" here means it is allowed from ALL
- ports:
- port: 80
protocol: TCP
podSelector:
matchLabels:
run: np-test-1
policyTypes:
- Ingress
Once you create PV, PVC, use it in a pod definition file by specifying the PVC name under persistentVolumeClaim
in the volumes
section:
# volumeMounts must match volume name!
spec:
containers:
volumeMounts:
- mountPath:
volumes:
persistentVolumeClaim:
claimName:
/etc/cni/net.d/
- this is where configuration file is located
/opt/cni/bin
- this is based on --cni-bin-dir
in kubelet
argument
Pod IP range: k logs weave-net-XXXX -c weave -n kube-system
Weave-net installation YAML only in k8s v1.17 documentation and below
Look for kube-api-server
argument --service-cluster-ip-range
(default is 10.0.0.0/24) or in /etc/kubernetes/manifests/kube-apiserver.yaml
DNS records for Services:
web-service
web-service.apps
web-service.apps.svc
web-service.apps.svc.cluster.local
For Pods, replace svc with pod and IP address with dashes instead of dots:
10-44-2-5.apps.pod.cluster.local
- this may however not be enabled by default
/etc/coredns/Corefile
- mounted from the ConfigMap coredns
k create ingress ingress-wear-watch --rule="wear.my-store.com/wear*=wear-service:80"
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: ingress-wear-watch
spec:
rules:
- http:
paths:
- path: /wear
backend:
serviceName: wear-service
servicePort: 80
- path: /watch
backend:
serviceName: watch-service
servicePort: 80
Rewriting annotations (/
=> /pay
):
nginx.ingress.kubernetes.io/rewrite-target:/
nginx.ingress.kubernetes.io/ssl-redirect:"false"
Typical process:
- Have the VMs ready.
- Install
Docker
. - Install
kubeadm
everywhere, use specific version for all components (kubectl
,kubelet
). - Run
kubeadm init
with minimum of:--apiserver-advertise-address <>
--apiserver-cert-extra-sans <>
--pod-network-cidr <>
- Copy
kubeconfig
from/etc/kubernetes/admin.conf
to a regular user. - Copy
kubeadm join
output on all of the workers. If lost, runkubeadm token list
orkubeadm token create --print-join-command
. - Install network plugin like Flannel/Calico/Weave, use https://v1-17.docs.kubernetes.io
sudo journalctl -u kube-apiserver
sudo journalctl -u kubelet
k logs kube-controller -n kube-system
k describe node node01
cat /var/log/kubelet.log /var/log/kube-proxy.log
Important kubelet
files:
/etc/kubernetes/kubelet.conf
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
/var/lib/kubelet/config.yaml
/var/lib/kubelet/kubeadm-flags.env
Last updated: Mon Sep 13 01:20:33 UTC 2021
Disclaimer: This is not a comprehensive guide, only few notes from the Udemy course.