kubectl get nodes > all_nodes.txt
cat all_nodes.txt | while read line ; do if [[ $line == *"sobusy"* ]]; then echo $line; fi; done > filtered_nodes.txt
kubectl logs <pod> <search_criteria> > some_file.txt
ex kubectl logs busybox | grep fail > logs.txt
ps aux
kubectl cluster-info
- get cluster info
kubectl create namespace [namespace]
- create a namespace
kubectl create -f [yaml file] --namespace [nmamespace]
- create a pod in a specific namespace using yaml file
kubectl config get-contexts
- get contexts
kubectl config use-context [context name]
- use different context
kubectl get componentstatus
- status of all components
kubectl get pods -n testing [pod name] -o jsonpath={.spec.containers[*].name}
- get container names from a pod
kubectl get --watch pod [pod name] -n testing
- watch for changes to a pod
kubectl get events
- get events on the cluster (events are namespaced)
kubectl get pods -l person=kevin
- get resources by label
kubectl get node <node> -o wide
- get more info from resource
kubectl get pods --show-labels
- show all labels
kubectl describe pod -n testing [pod name]
- describe pod
kubectl logs -f -n testing [pod name]
- get logs from a pod
kubectl logs <pod_name> | grep <search_term>
- filter logs from a pod
kubectl logs -f -n testing [pod name] -c [container name]
- get logs from a container
kubectl top pods
- get usage info for pods
kubectl top pod [pod name] --containers -n prod
- get usage info for containers in a pod
kubectl top node [node name]
- get top info for a node
kubectl top pod --namespace=C --selector=A=B
kubectl scale rc production --replicas=6
kubectl create secret generic kevin-secret --from-file=my_secret.txt --dry-run -o yaml | kubectl apply -f -
kubectl create secret generic my-secret --from-literal=key1=supersecret
- secret from literal
kubectl run my_nginx --image=nginx --replicas=2 --port=80
- make simple deployment with two replicas
kubectl run ubuntu-pod --image=gcr.io/google_containers/ubuntu:14.04 --port=8080
kubectl expose deployment ubuntu-pod --type=NodePort
- create service for existing service
kubectl set image deployment/nginx-deployment nginx=nginx:1.9.1 --record
kubectl rollout history deployment/nginx-deployment
- check history
kubectl rollout status deployment nginx-deployment
- check rollout status
kubectl label pods --all person=kevin
- attach label to resource
kubectl label pods --all person-
- remove label from resource
kubectl drain $node --delete-local-data=true --force
(add & to throw into the background)
$ kubectl run nginx-is-dumb --image=nginx --replicas=2 --port=80
deployment "nginx-is-dumb" created
$ kubectl expose deployment nginx-is-dumb
service "nginx-is-dumb" exposed
$ kubectl run busybox --image=busybox --rm --restart=OnFailure -ti -- /bin/nslookup nginx-is-dumb.default
Server: 10.11.240.10
Address 1: 10.11.240.10 kube-dns.kube-system.svc.cluster.local
In order to get the pod’s IP address, look up the ip for the pod by running kubectl describe pod nginx-is-dumb
, and then, inside the busybox temp pod, run:
kubectl run busybox --image=busybox --rm --restart=OnFailure -ti -- /bin/nslookup <pod-ip>.default.pod.cluster.local
which will give you four lines of output. We want the bottom two lines.
create nginx pod and expose it
k run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools
nslookup nginx.default.svc.cluster.local
sudo chown -R student:student /opt
ssh onto etcd node
sudo su -
cat /proc/$(pgrep etcd)/environ | xargs -0 -n1 echo | grep ETCD_DATA
that will give you the data directory
etcdctl backup --data-dir <data directory> --backup-dir <target dir>
ssh onto the node you want to setup
scp hk8s-node-1:/etc/systemd/system/kubelet.service .
scp hk8s-node-1:/etc/systemd/system/kube-proxy.service .
scp kubelet.service ik8s-node-0:
scp kube-proxy.service ik8s-node-0:
ssh ik8s-node-0
manually edit over there, put into /etc/systemd/system, use systemctl, etc.
- ssh onto worker node
sudo su -
- Edit /etc/systemd/system/kubelet.service
- Add
--pod-manifest-path /etc/kubernetes/manifests
mkdir -p /etc/kubernetes/manifests
make sure path exists- Add the pod manifest file (this is the yaml file) to
/etc/kubernetes/manifests
systemcl daemon-reload
systemctl restart kubelet.service
or whatever the kubelet service is called, on juju wassudo systemctl restart snap.kubelet.daemon.service