Controle plane (Master)
- kube apiserver
- kube controller manager
- kube scheduler
- etcd
Nodes
- kubelet
- kube proxy
Create Sef signed Root SSL certificate (CA master node)
openssl genrsa -out ca.key 2048
openssl req -new -key ca.key -subj "/CN=KUBERNETES-CA" -out ca.csr
openssl x509 -req -in ca.csr -signkey site.key -out ca.crt
openssl rsa -in ca.key -pubout > ca.pem
First create the openssl.cnf
who describe certificate with DNS names ...
openssl genrsa -out apiserver.key 2048
openssl req -new -key apiserver.key -subj "/CN=kube-apiserver" -out apiserver.csr -config openssl.cnf
openssl x509 -req -in apiserver.csr -CA ca.crt -CAKey ca.key -out apiserver.crt
Create signed certificate for admin user with admin prileges
openssl genrsa -out admin.key 2048
# Signing certificate using root cert
openssl req -new -key ca.key -subj "/CN=kube-admin/O=system:masters" -out admin.csr
# Signed certificate
openssl x509 -req -in admin.csr -CA ca.crt -CAKey ca.key -out admin.crt -CAcreateserial
TIIP : curl API using certificates
curl https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version --key admin.key --cert admin.crt --cacert ca.crt
CertificateSigningRequest
DOC : Reference / API Access Control / Certificate Signing Requests
# Help
kubectl explain csr
kubectl explain csr.spec
# Create
cat <<EOF > john-csr.yaml
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: john
spec:
groups:
- system:authenticated
request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFV........
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
k apply -f john-csr.yaml
Manage CertificateSigningRequest (csr)
k get csr
kubectl get csr/john -o yaml
k certificate approve john
k certificate deny johny
Avoid to specify servers urls + credentials files when using kublectl command line :
Ex: without kubeconfig
kubectl get pods \
--server my-api-server:6443 \
--client-key admin.key \
--client-certificate adlin.crt \
--certificate-authority ca.crt
Kubeconfig file structure = clusters + users +contextes
kind: Config
apiVersion: v1
current-context: admin@cluster1
clusters:
- name: cluster1
cluster:
server: https://my-api-server:6443
certificate-authority-data: BASE64_ENCODED_CA_CRT...
contextes:
- name: admin@cluster1
context:
user: admin
cluster: cluster1
namespace: staging
users:
- name: admin
user:
client-certificate-data: BASE64_ENCODED_CRT...
client-key-data: BASE64_ENCODED_CRT...
- name: minikube
user:
client-certificate: /home/user/.minikube/client.crt
client-key: /home/user/.minikube/client.key
KubeConfig infos | |
---|---|
Get kubectl config help | kubectl config -h |
Default location | $HOME/.kube/config |
Define custom config for kubectl | export KUBECONFIG=/path/to/config/file |
Show current config command | kubectl config view |
Specify in kubectl | kubectl get pod --kubeconfig=path/to/config/file |
Create context | kubectl config set-context admin@cluster1-prod --user admin --namespace prod --cluster cluster1 |
Change current context | kubectl config use-context dev@cluster2 |
Resources actions :
# Impersonnate :
kubectl auth can-i create pod --as-user mike --namespace production
# Liste Namspaced / Cluster scoped resoources
kubectl api-resources --namespaced=false
kubectl expain NetworkPolicy
HIW : Create policy rules (Ingress: from, Egress: to) using labels matching
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: db-plicy
spec:
podSelector
matchLabels:
role: db
policyTypes:
- Ingress
ingress:
- from:
- podSelector
matchLabels:
name: api-pod
ports:
- protocol: TCP
port: 3306
kubectl proxy &
curl http://localhost:8001/api -k
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text -noout
\n
:cat my-cert.csr | base64 - w 0
```sh
* Create an kubeconfig from admin certs
```sh
{
kubectl config set-cluster kubernetes-the-hard-way \
--certificate-authority=ca.crt \
--embed-certs=true \
--server=https://127.0.0.1:6443 \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.crt \
--client-key=admin.key \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes-the-hard-way \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
}
https://kubernetes.io/docs/reference/access-authn-authz/bootstrap-tokens/
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-cluster bootstrap --server='https://${KUBERNETES_LB_IP}:6443' --certificate-authority=/var/lib/kubernetes/ca.crt
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-credentials kubelet-bootstrap --token=07401b.f395accd246ae52d
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig set-context bootstrap --user=kubelet-bootstrap --cluster=bootstrap
sudo kubectl config --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig use-context bootstrap
TUTORIALS :
//...
# Service status / restart
sudo service myservice status
sudo service myservice restart
# Check service configuration
cat /etc/systemd/system/myservice.service
# Reload systemctl
sudo systemctl daemon-reload
# Show logs
sudo journalctl -u myservice
sudo ETCDCTL_API=3 etcdctl member list \
--endpoints=https://127.0.0.1:2379 \
--cacert=/etc/etcd/ca.crt \
--cert=/etc/etcd/etcd-server.crt \
--key=/etc/etcd/etcd-server.key
kubectl get componentstatuses --kubeconfig admin.kubeconfig
Cluster version
kubectl version --short
kubectl get nodes -o wide
Free a node before maintenance
k drain node01
# After maintenance make node01 available to be scheduled
k uncordon node01
Check if laster can be scheduled for workload pod
k describe node master | grep tain
Make node unschedulabel (critical resoucres)
k describe cordon master
Get next stable release vesion available
kubeadm upgrade plan
kubectl drain master
kubeadm version
# Upgrade kubeadm to targeted version
sudo apt installl kubeadm=1.18.0-00
kubeadm upgrade apply v1.18.0
kubectl version --short
# Upgrade kubelet to targeted version
sudo apt installl kubelet=1.18.0-00
kubectl uncordon master
kubectl get nodes
kubectl drain node01
ssh node01
sudo apt installl kubeadm=1.18.0-00
kubeadm upgrade node
sudo apt installl kubelet=1.18.0-00
kubectl uncordon node01
kubectl get nodes
Get ETCD version
k describe pod -n kube-system etcd-master-pod | grep Image
Test ETCDCTL command parameters etcdctl members list
ETCDCTL_API=3 etcdctl members list \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/ca.key \
--endpoints=127.0.0.1:2379
Take ETCD spnapshot
ETCDCTL_API=3 etcdctl snapshot save -h
SNAPSHOT_LOCATION=/tmp/snapshot.db
ETCDCTL_API=3 etcdctl snapshot save \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
--endpoints=127.0.0.1:2379 $SNAPSHOT_LOCATION
Restore backup
ETCDCTL_API=3 etcdctl snapshot restore -h
SNAPSHOT_LOCATION=/tmp/snapshot.db
ETCDCTL_API=3 etcdctl snapshot restore \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
--endpoints=127.0.0.1:2379 \
--name=master \
--initial-cluster="master=https://127.0.0.1:2380" \
--initial-advertise-peer-urls="https://127.0.0.1:2380" \
--initial-cluster-token="etcd-cluster-001" \
--data-dir=/var/lib/etcd-from-backup $SNAPSHOT_LOCATION
# IMPORTANT : Edit etcd config and set parameters (initial-cluster-token, data-dir) :
vi /etc/kubernetes/manifests/etcd.yaml
# --initial-cluster-token="etcd-cluster-001"
# --data-dir=/var/lib/etcd-from-backup (+ volumeMounts.mountPath + volumes.path )
ETCD static pod i auomatically recreated. Test liste member should display a new member
ETCDCTL_API=3 etcdctl members list --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/server.crt --key=/etc/kubernetes/pki/etcd/ca.key -endpoints=127.0.0.1:2379
k get pod,svc,deployment
# list interfaces
ip link
# ip associated to inet's
ip addr
ip addr add 192.168.1.10/24 dev eth0
# List or add routes
ip r
ip route
ip route add 192.168.2.10/24 via 192.168.1.1
ifconfifg ens3
ip link
Add local DNS entries
echo "192.168.1.3" >> /etc/hosts
Show DNS server
cat /etc/resolv.conf
ip netns add blue
# list interfaces inside blue net-ns
ip netns exec blue ip link
Exec in a netns : `ip netns exec == ip -n
ip netns exec blue ip link
# or
ip -n blue ip link
Add virtual links between net namespaces interaces (veth)
# Create veth and link two ns
ip link add veth-red type veth peer name veth-blue
ip link set veth-red netns red
ip link set veth-blue netns blue
# Give ip addr to veth's
ip -n red addr add 192.168.15.1 dev veth-red
ip -n blue addr add 192.168.15.2 dev veth-blue
# Activate
ip -n red link set veth-red up
ip -n red link set veth-red up
Make a ping to a netns ip : ip netns exec
ip -n red ping 192.168.15.2
Check if machine is allowed to forward ip trafic :
cat /proc/sys/net/ipv4/ip_forward
A specification to simplify containers networks management
Ex K8S CNI : https://kubernetes.io/docs/concepts/cluster-administration/addons/
NB : Docker posssède sa propre implémentation de la gestion des réseau (CNM : Container Network Model) différente de CNI. Ainsi lorsque Kubernetes itulise docker il désactive la gestion du réseau et rajoute lui-même la couche CNI.
docker run --network=non nginx
bridge add xaxaxa /var/run/netns/xaxaxa
k describe service myservice
k get pod --show-labels
k describe pod mypod
k logs mypod
k logs mypod --previous
# Kubernetes hard way
sudo journalctl -u etcd etcd.service -l
sudo journalctl -u kube-apiserver
# Continers installation (kubeadm)
kubectl logs etcd-master -n kube-system
kubectl logs kube-apiserver-master -n kube-system
docker logs 8c1a23efd
top
df -h
sudo journalctl –u kubelet
openssl x509 -in /var/lib/kubelet/worker-1.crt -text
Public-key encryption example using OpenSSL : https://gist.github.com/thinkerbot/706137
--service-cluster-ip-range
param on kube-api-server service configurationTest the access to serivce via generated DS records
# Call from same namespace
curl http://my-service
# Call from another NS
curl http://my-service.mynamespace
curl http://my-service.mynamespace.svc
# Fully qualified domain (Root = cluster.local)
curl http://my-service.mynamespace.svc.cluster.local
# Pod IP based generated name
curl http://10-244-2-3.mynamespace.svc.cluster.local
Show ip tables entries for a service
iptables -L -t net | grep db-service
cat /etc/log/kube-proxy.log
Get Pod config yaml using --dry-run
kubeclt run my-pod --image=busybox --command="sleep 3000" --dry-run=client -o yaml > pod.yaml
Patch the configuration to specify node:
# ...
spec:
nodeName: node03
image: busybox
Create the pod:
kubectl apply -f pod.yaml
SSH into pod container then display default gateway
kubectl exec -it my-pod -- sh
ip route
/etc/coredns/Corefile
on the pod: see kubernetes plugin section in Corefile/etc/resolv.conf
on each pod it create/var/lib/kubelet/config.yaml
#!/bin/bash
# IMPORTANT : $USER must be a vali FQDN
USER=$1
K8S_CSR_VERSION=certificates.k8s.io/v1beta1
#K8S_CSR_VERSION=certificates.k8s.io/v1
echo
echo "Certificate Signing Request Generator v0.1.0"
echo "--------------------------------------------"
echo
if [ -z "$USER" ]
then
echo "ERROR: Missing USER param eg 'john': IMPORTANT USER must be a vali FQDN"
exit 1
fi
if [ -f ${USER}.key ]
then
echo "ERROR: A file with name ${USER}.key (private key) was found. Operation aborted !"
exit 1
fi
echo ">>> Create private key for \CN $USER"
echo
openssl genrsa -out ${USER}.key 2048
openssl req -new -subj "/CN=${USER}" -key ${USER}.key -out ${USER}.csr
echo "SUCCES: Private key and CSR files created at: ${USER}.key, ${USER}.csr"
echo
openssl req -noout -text -verify -in ./${USER}.csr
BASE64_CSR=$(cat ${USER}.csr | base64 -w 0)
echo
echo ">>> Create CertificateSigningRequest yaml"
echo
cat <<EOF > ${USER}-csr.yaml
apiVersion: $K8S_CSR_VERSION
kind: CertificateSigningRequest
metadata:
name: ${USER}
spec:
groups:
- system:authenticated
request: $BASE64_CSR
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
echo "You can now submit the CSR for cluster CA signature using command:"
echo " >> 'kubectl apply -f ${USER}-csr.yaml'"
echo
echo "Other management commands :"
echo
echo " List CSR (view status) >> kubectl get csr"
echo " Approve pending CSR >> kubectl certificte approve ${USER}"
echo " Dowload signed CERT >> kubectl get csr/${USER} -o jsonpath='{.status.certificate}' | base64 --decode > ${USER}.crt"
echo " Verifiy certificate >> openssl x509 -noout -text -in ${USER}.crt"
echo
We use previous created bash script signing-request.sh
:
bash signing-request.sh mike
kubectl get csr
kubectl certificte approve mike
kubectl get csr/mike -o jsonpath='{.status.certificate}' | base64 --decode > mike.crt
kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods --namespace default
kubectl create rolebinding pod-reader-rb --clusterrole=pod-reader --user=mike
We suppose that cluster name is my-cluster
find by calling kubectl config view
k config set-credentials mike --client-certificate mike.crt --client-key mike.key --embed-certs=true
k config set-context mike@my-cluster --cluster my-cluster --user mike --namespace default
# Authorized call
kubectl get pods my-cluster --context mike@my-cluster
# Forbidden call : User "mike" cannot list resource "pods" in API group "" in the namespace "kube-system"
kubectl get pods my-cluster --context mike@my-cluster --namespace kube-system
kubectl auth can-i list nodes --as system:serviceaccount:namespace:service_account_name
kubectl auth can-i get nodes --namespace demonamespace --as system:serviceaccount:demonamespace:service_account_name
kubectl auth can-i get pods --all-namespaces --as system:serviceaccount:namespace:service_account_name
kubectl auth can-i list services --all-namespaces --as system:serviceaccount:namespace:service_account_name
kubectl get events --sort-by=".metadata.creationTimestamp" | tail -8
kubectl run nginx image=nginx --port=80 --record
kubectl set image deployment nginx nginx=nginx:1.2
kubectl rollout history deployment nginx
kubectl rollout status deployment nginx
kubectl rollout undo deployment nginx --to-revision=2
kubectl autoscale deployment nginx --cpu-percent=50 -- min=1 -- max 2
kubectl run nginx3 --image=nginx --requests=cpu=200m --limits=cpu=300m --requests=memory=1Gi --limits=memory=2Gi
kubectl run hello --schedule=”*/1 * * * *” --restart=OnFailure -- image=busybox -- /bin/sh -c “date; echo Hello from the kubernetes cluster”
kubectl port-forward redis-master-765d459796–258hz 6379:6379
kubectl get pods redis-master-765d459796–258hz -o yaml
kubectl create secret docker-registry --dry-run=true registryhttps --docker-server=https://example.com:5000 --docker-username=username --docker-password=password --docker-email=docker@docker.com -o yaml
kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt (echo -n ‘username’ > ./username.txt, echo -n ‘password’ > ./pass)
kubectl get secrets -o yaml
kubectl create secret generic db-pass --from-literal=username=<username> --from-lieral=password=<somebase64password>
kubectl top node NODE_NAME
kubectl top pod --namespace=<namespace>
kubectl top pod POD_NAME --containers
kubectl top pod -l name=myLabel
Kubectl rollout resume deploy/nginx
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep username | awk ‘{print $1}’)
Kubernetes Architecture Diagram