Skip to content

Instantly share code, notes, and snippets.

@swghosh
Last active March 27, 2024 12:57
Show Gist options
  • Save swghosh/d537f737262d7abc7704c8a48e940184 to your computer and use it in GitHub Desktop.
Save swghosh/d537f737262d7abc7704c8a48e940184 to your computer and use it in GitHub Desktop.
Launch kubeadm bootstrapped single node k8s cluster in a Fedora linux VM. (optionally, create an AWS Fedora VM and add k8s etcd encryption through AWS KMS plugin)
#!/bin/bash
sudo dnf group install -y "Container Management"
sudo dnf install -y golang
go version
sudo yum install -y \
containers-common \
device-mapper-devel \
git \
glib2-devel \
glibc-devel \
glibc-static \
go \
gpgme-devel \
libassuan-devel \
libgpg-error-devel \
libseccomp-devel \
libselinux-devel \
pkgconfig \
make \
runc
# remove swap
sudo dnf remove zram-generator-defaults
sudo reboot
curl -O https://raw.githubusercontent.com/cri-o/cri-o/main/scripts/get
sudo bash ./get -t v1.27.0 # this gives some error for runc, runc is at path /usr/bin/runc not /usr/local/bin/runc
sudo chcon -u system_u -r object_r -t container_runtime_exec_t /usr/bin/runc
sudo systemctl enable crio --now
crio --version
# ipv4 forward, net overlay
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# sysctl params required by setup, params persist across reboots
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# Apply sysctl params without reboot
sudo sysctl --system
lsmod | grep br_netfilter
lsmod | grep overlay
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
cat <<EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
# disable selinux (not recommended in production, use kubelet with selinux instead)
# sudo setenforce 0
# sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
sudo yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
# turn off swap
# sudo swapoff -a
# free -h
cat << EOF > kubelet-e2e.te
module kubelet-e2e 1.0;
require {
type iptables_t;
type cgroup_t;
type user_tmp_t;
type init_t;
type container_t;
class dir { ioctl create open write };
class file { append create lock map open read rename unlink write };
}
#============= init_t ==============
allow init_t user_tmp_t:file { append create lock map open read rename unlink write };
#============= container_t ==============
allow container_t user_tmp_t:dir { create open write };
#============= iptables_t ==============
allow iptables_t cgroup_t:dir ioctl;
EOF
sudo checkmodule -M -m -o /root/kubelet-e2e.mod ./kubelet-e2e.te
sudo semodule_package -o /root/kubelet-e2e.pp -m /root/kubelet-e2e.mod
sudo semodule -i /root/kubelet-e2e.pp
sudo mkdir -p /var/lib/kubelet
sudo chcon -R -u system_u -r object_r -t var_lib_t /var/lib/kubelet
sudo systemctl enable --now kubelet
sudo kubeadm init # this step fails, if swap was not disabled before
mkdir -p ~/.kube
sudo cp -i /etc/kubernetes/admin.conf ~/.kube/config
sudo chown $(id -u):$(id -g) ~/.kube/config
kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/tigera-operator.yaml
kubectl taint nodes --all node-role.kubernetes.io/control-plane-
# kubectl taint nodes --all node-role.kubernetes.io/master-
# KMS ARN: xxxx
cat << EOF | sudo tee /etc/kubernetes/manifests/cloud-kms-plugin.yaml
apiVersion: v1
kind: Pod
metadata:
name: aws-encryption-provider
namespace: kube-system
spec:
securityContext:
runAsNonRoot: false
hostNetwork: true
containers:
- image: quay.io/swghosh/aws-cloud-kms
name: aws-encryption-provider
command:
- /aws-encryption-provider
- --key=arn:aws:kms:ap-south-1:xxxxxxxxxxx:key/xxxxxxxxxxxxxxxxxx
- --region=ap-south-1
- --listen=/var/run/kmsplugin/socket.sock
ports:
- containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
volumeMounts:
- mountPath: /var/run/kmsplugin
name: var-run-kmsplugin
volumes:
- name: var-run-kmsplugin
hostPath:
path: /var/run/kmsplugin
type: DirectoryOrCreate
EOF
sudo mkdir -p /etc/kubernetes/kms
cat << EOF | sudo tee /etc/kubernetes/kms/encryption-provider-config.yaml
apiVersion: apiserver.config.k8s.io/v1
kind: EncryptionConfiguration
resources:
- resources:
- secrets
providers:
- kms:
name: aws-encryption-provider
endpoint: unix:///var/run/kmsplugin/socket.sock
cachesize: 1000
timeout: 3s
- identity: {}
EOF
# [fedora@ip-172-31-14-139 ~]$ sudo cat /etc/kubernetes/manifests/kube-apiserver.yaml
# apiVersion: v1
# kind: Pod
# metadata:
# annotations:
# kubeadm.kubernetes.io/kube-apiserver.advertise-address.endpoint: 172.31.14.139:6443
# creationTimestamp: null
# labels:
# component: kube-apiserver
# tier: control-plane
# name: kube-apiserver
# namespace: kube-system
# spec:
# containers:
# - command:
# - kube-apiserver
# - --advertise-address=172.31.14.139
# - --allow-privileged=true
# - --authorization-mode=Node,RBAC
# - --client-ca-file=/etc/kubernetes/pki/ca.crt
# - --enable-admission-plugins=NodeRestriction
# - --enable-bootstrap-token-auth=true
# - --etcd-cafile=/etc/kubernetes/pki/etcd/ca.crt
# - --etcd-certfile=/etc/kubernetes/pki/apiserver-etcd-client.crt
# - --etcd-keyfile=/etc/kubernetes/pki/apiserver-etcd-client.key
# - --etcd-servers=https://127.0.0.1:2379
# - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
# - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
# - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
# - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
# - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
# - --requestheader-allowed-names=front-proxy-client
# - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
# - --requestheader-extra-headers-prefix=X-Remote-Extra-
# - --requestheader-group-headers=X-Remote-Group
# - --requestheader-username-headers=X-Remote-User
# - --secure-port=6443
# - --service-account-issuer=https://kubernetes.default.svc.cluster.local
# - --service-account-key-file=/etc/kubernetes/pki/sa.pub
# - --service-account-signing-key-file=/etc/kubernetes/pki/sa.key
# - --service-cluster-ip-range=10.96.0.0/12
# - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
# - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
# - --encryption-provider-config=/etc/kubernetes/kms/encryption-provider-config.yaml
# - --encryption-provider-config-automatic-reload
# image: registry.k8s.io/kube-apiserver:v1.27.2
# imagePullPolicy: IfNotPresent
# livenessProbe:
# failureThreshold: 8
# httpGet:
# host: 172.31.14.139
# path: /livez
# port: 6443
# scheme: HTTPS
# initialDelaySeconds: 10
# periodSeconds: 10
# timeoutSeconds: 15
# name: kube-apiserver
# readinessProbe:
# failureThreshold: 3
# httpGet:
# host: 172.31.14.139
# path: /readyz
# port: 6443
# scheme: HTTPS
# periodSeconds: 1
# timeoutSeconds: 15
# resources:
# requests:
# cpu: 250m
# startupProbe:
# failureThreshold: 24
# httpGet:
# host: 172.31.14.139
# path: /livez
# port: 6443
# scheme: HTTPS
# initialDelaySeconds: 10
# periodSeconds: 10
# timeoutSeconds: 15
# volumeMounts:
# - mountPath: /etc/ssl/certs
# name: ca-certs
# readOnly: true
# - mountPath: /etc/pki
# name: etc-pki
# readOnly: true
# - mountPath: /etc/kubernetes/pki
# name: k8s-certs
# readOnly: true
# - mountPath: /var/run/kmsplugin
# name: kms-plugin
# - mountPath: /etc/kubernetes/kms
# name: etc-kms
# hostNetwork: true
# priority: 2000001000
# priorityClassName: system-node-critical
# securityContext:
# seccompProfile:
# type: RuntimeDefault
# volumes:
# - hostPath:
# path: /etc/ssl/certs
# type: DirectoryOrCreate
# name: ca-certs
# - hostPath:
# path: /etc/pki
# type: DirectoryOrCreate
# name: etc-pki
# - hostPath:
# path: /etc/kubernetes/pki
# type: DirectoryOrCreate
# name: k8s-certs
# - hostPath:
# path: /etc/kubernetes/kms
# type: DirectoryOrCreate
# name: etc-kms
# - hostPath:
# path: /var/run/kmsplugin
# type: DirectoryOrCreate
# name: kms-plugin
# status: {}
sudo systemctl restart kubelet
kubectl create -f - << EOF
apiVersion: v1
stringData:
credentials: |-
This is a dummy secret, not actual credentials, but nvm.
kind: Secret
metadata:
name: web-creds
type: Opaque
EOF
curl -L -O https://github.com/etcd-io/etcd/releases/download/v3.5.9/etcd-v3.5.9-linux-amd64.tar.gz
tar -xvzf etcd-v3.5.9-linux-amd64.tar.gz
sudo etcd-v3.5.9-linux-amd64/etcdctl \
--cacert=/etc/kubernetes/pki/etcd/ca.crt \
--cert=/etc/kubernetes/pki/etcd/server.crt \
--key=/etc/kubernetes/pki/etcd/server.key \
get /registry/secrets/default/web-creds | hexdump -C
AWS API Calls to create a Fedora VM
1. API call: RunInstances
{
"MaxCount": 1,
"MinCount": 1,
"ImageId": "ami-0f6a5497862cc5fc1",
"InstanceType": "m5a.2xlarge",
"KeyName": "swghosh-2503",
"EbsOptimized": true,
"BlockDeviceMappings": [
{
"DeviceName": "/dev/sda1",
"Ebs": {
"Encrypted": false,
"DeleteOnTermination": true,
"Iops": 3000,
"SnapshotId": "snap-01ae00184018c8289",
"VolumeSize": 128,
"VolumeType": "gp3",
"Throughput": 125
}
}
],
"NetworkInterfaces": [
{
"SubnetId": "subnet-05e78b0ba4624ee6b",
"AssociatePublicIpAddress": true,
"DeviceIndex": 0,
"Groups": [
"<groupId of the new security group created below>"
]
}
],
"TagSpecifications": [
{
"ResourceType": "instance",
"Tags": [
{
"Key": "Name",
"Value": "swghosh-fedora-k8s-vm"
}
]
}
],
"PrivateDnsNameOptions": {
"HostnameType": "ip-name",
"EnableResourceNameDnsARecord": false,
"EnableResourceNameDnsAAAARecord": false
}
}
2. API call: CreateSecurityGroup
{
"GroupName": "launch-wizard",
"Description": "launch-wizard created 2023-06-15T07:06:00.860Z",
"VpcId": "vpc-043138d0dfca8147e"
}
3. API call: AuthorizeSecurityGroupIngress
{
"GroupId": "<groupId of the security group created above>",
"IpPermissions": [
{
"IpProtocol": "tcp",
"FromPort": 22,
"ToPort": 22,
"IpRanges": [
{
"CidrIp": "0.0.0.0/0"
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment