Skip to content

Instantly share code, notes, and snippets.

@NassK
Last active June 12, 2021 14:17
Show Gist options
  • Save NassK/e552bfd44b5ceae3372de5bc82a6f357 to your computer and use it in GitHub Desktop.
Save NassK/e552bfd44b5ceae3372de5bc82a6f357 to your computer and use it in GitHub Desktop.
step_6.tf
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${arn_instance_role}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["events", "endpoints"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["pods/eviction"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["endpoints"]
resourceNames: ["cluster-autoscaler"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["watch", "list", "get", "update"]
- apiGroups: [""]
resources:
- "pods"
- "services"
- "replicationcontrollers"
- "persistentvolumeclaims"
- "persistentvolumes"
verbs: ["watch", "list", "get"]
- apiGroups: ["extensions"]
resources: ["replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["policy"]
resources: ["poddisruptionbudgets"]
verbs: ["watch", "list"]
- apiGroups: ["apps"]
resources: ["statefulsets", "replicasets", "daemonsets"]
verbs: ["watch", "list", "get"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses", "csinodes"]
verbs: ["watch", "list", "get"]
- apiGroups: ["batch", "extensions"]
resources: ["jobs"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
- apiGroups: ["coordination.k8s.io"]
resourceNames: ["cluster-autoscaler"]
resources: ["leases"]
verbs: ["get", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create","list","watch"]
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["cluster-autoscaler-status", "cluster-autoscaler-priority-expander"]
verbs: ["delete", "get", "update", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: cluster-autoscaler
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
k8s-addon: cluster-autoscaler.addons.k8s.io
k8s-app: cluster-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: cluster-autoscaler
subjects:
- kind: ServiceAccount
name: cluster-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cluster-autoscaler
namespace: kube-system
labels:
app: cluster-autoscaler
spec:
replicas: 1
selector:
matchLabels:
app: cluster-autoscaler
template:
metadata:
labels:
app: cluster-autoscaler
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8085"
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
spec:
serviceAccountName: cluster-autoscaler
containers:
- image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.17.3
name: cluster-autoscaler
resources:
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- ./cluster-autoscaler
- --v=4
- --stderrthreshold=info
- --cloud-provider=aws
- --skip-nodes-with-local-storage=false
- --expander=least-waste
- --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/${cluster_name}
- --balance-similar-node-groups
- --skip-nodes-with-system-pods=false
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs/ca-certificates.crt
readOnly: true
imagePullPolicy: "Always"
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs/ca-bundle.crt"
# Step 6: Adding the worker nodes + CNI + Kubernetes Cluster Autoscaler
resource "null_resource" "install_calico" { # The node won't enter the ready state without a CNI initialized
provisioner "local-exec" {
command = "kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml"
}
depends_on = [null_resource.generate_kubeconfig]
}
data "template_file" "aws_auth_configmap" { # Generates the aws-auth, otherwise, worker node can't join. Use this cm to add users/role to your cluster
template = file("${path.module}/aws-auth-cm.yaml.tpl")
vars = {
arn_instance_role = aws_iam_role.node_group.arn
}
}
resource "null_resource" "apply_aws_auth_configmap" { # Apply the aws-auth config map
provisioner "local-exec" {
command = "echo '${data.template_file.aws_auth_configmap.rendered}' > aws-auth-cm.yaml && kubectl apply -f aws-auth-cm.yaml && rm aws-auth-cm.yaml"
}
depends_on = [null_resource.generate_kubeconfig]
}
resource "aws_eks_node_group" "node_group" { # One node group per AZ (each AZ has its own private subnet)
count = length(module.vpc.private_subnets)
cluster_name = aws_eks_cluster.cluster.name
node_group_name = "fnode_group-${substr(module.vpc.private_subnets[count.index], 7, length(module.vpc.private_subnets[count.index]))}"
node_role_arn = aws_iam_role.node_group.arn
subnet_ids = [module.vpc.private_subnets[count.index]]
scaling_config {
desired_size = 1
max_size = 3
min_size = 1
}
depends_on = [null_resource.apply_aws_auth_configmap]
}
resource "aws_iam_role" "node_group" {
name = "eks_node_group_role"
assume_role_policy = jsonencode({
Statement = [{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}]
Version = "2012-10-17"
})
}
resource "aws_iam_role_policy_attachment" "policy-AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.node_group.name
}
resource "aws_iam_role_policy_attachment" "policy-AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.node_group.name
}
data "template_file" "cluster_autoscaler_yaml" { # Generate the cluster autoscaler from a template
template = file("${path.module}/cluster-autoscaler.yaml.tpl")
vars = {
cluster_name = aws_eks_cluster.cluster.name
}
}
resource "null_resource" "cluster_autoscaler_install" { # Install the cluster autoscaler
provisioner "local-exec" {
command = "echo '${data.template_file.cluster_autoscaler_yaml.rendered}' > cluster_autoscaler.yaml && kubectl apply -f cluster_autoscaler.yaml && rm cluster_autoscaler.yaml"
}
depends_on = [aws_eks_cluster.cluster, null_resource.generate_kubeconfig]
}
variable "region" {
description = "The AWS region"
default = "eu-west-1"
}
variable "cluster_name" {
description = "The name of the Amazon EKS cluster."
default = "my-eks-cluster"
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment