(OPTIONAL): This is already done in Ansible scripts if using https://github.com/lukassup/libvirt-istiolab-tf
NOTE: KubeProxy config can be passed to
kubeadm init
.
kubectl edit configmap -n kube-system kube-proxy
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
ipvs:
strictARP: true
kubectl delete pod -n kube-system -l k8s-app=kube-proxy
(OPTIONAL): The
.10
IP is the default and this step can be skipped if using https://github.com/lukassup/libvirt-istiolab-tf
Set predefined clusterIP for kube-dns
kubectl patch svc/kube-dns -n kube-system --patch '{"spec":{"clusterIP":"10.1.1.10"}}'
- install
calicoctl
on Kubernetes control plane nodes - install Calico manifest using
kubectl apply -f calico.yaml
kubectl set env daemonset/calico-node -n kube-system IP_AUTODETECTION_METHOD=kubernetes-internal-ip
kubectl set env daemonset/calico-node -n kube-system CALICO_IPV4POOL_IPIP=Never
kubectl set env daemonset/calico-node -n kube-system CALICO_IPV4POOL_VXLAN=Never
kubectl set env daemonset/calico-node -n kube-system CALICO_IPV4POOL_NAT_OUTGOING=true
kubectl wait pods -n kube-system -l k8s-app=calico-node --for condition=Ready --timeout=60s
calicoctl patch pool default-ipv4-ippool --patch='{"spec":{"ipipMode":"Never"}}'
calicoctl patch pool default-ipv4-ippool --patch='{"spec":{"vxlanMode":"Never"}}'
calicoctl patch pool default-ipv4-ippool --patch='{"spec":{"natOutgoing":true}}'
NOTE: using Istio subzone label here which is typically a rack for on-prem deployment
calicoctl apply -f - <<EOF
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: rack-10-0-1-tor
spec:
asNumber: 64513
nodeSelector: topology.istio.io/subzone == 'rack-10-0-1'
peerIP: 10.0.1.254
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: rack-10-0-2-tor
spec:
asNumber: 64514
nodeSelector: topology.istio.io/subzone == 'rack-10-0-2'
peerIP: 10.0.2.254
---
apiVersion: projectcalico.org/v3
kind: BGPPeer
metadata:
name: rack-10-0-3-tor
spec:
asNumber: 64515
nodeSelector: topology.istio.io/subzone == 'rack-10-0-3'
peerIP: 10.0.3.254
EOF
kubectl label node kube-ctrl01 topology.istio.io/subzone=rack-10-0-1
kubectl label node kube-node01 topology.istio.io/subzone=rack-10-0-1
kubectl label node kube-ctrl02 topology.istio.io/subzone=rack-10-0-2
kubectl label node kube-node02 topology.istio.io/subzone=rack-10-0-2
kubectl label node kube-ctrl03 topology.istio.io/subzone=rack-10-0-3
kubectl label node kube-node03 topology.istio.io/subzone=rack-10-0-3
calicoctl patch node kube-ctrl01 --patch='{"spec":{"bgp":{"asNumber":64513}}}'
calicoctl patch node kube-node01 --patch='{"spec":{"bgp":{"asNumber":64513}}}'
calicoctl patch node kube-ctrl02 --patch='{"spec":{"bgp":{"asNumber":64514}}}'
calicoctl patch node kube-node02 --patch='{"spec":{"bgp":{"asNumber":64514}}}'
calicoctl patch node kube-ctrl03 --patch='{"spec":{"bgp":{"asNumber":64515}}}'
calicoctl patch node kube-node03 --patch='{"spec":{"bgp":{"asNumber":64515}}}'
calicoctl apply -f - <<EOF
---
apiVersion: projectcalico.org/v3
kind: BGPConfiguration
metadata:
name: default
spec:
nodeToNodeMeshEnabled: false
logSeverityScreen: Info
bindMode: NodeIP
serviceClusterIPs:
- cidr: 10.1.1.0/24
EOF
# calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+---------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+--------------+---------------+-------+----------+-------------+
| 10.0.1.254 | node specific | up | 09:20:33 | Established |
+--------------+---------------+-------+----------+-------------+
leaf01# show bgp vrf vrf-main ipv4 unicast summary
BGP router identifier 10.0.0.1, local AS number 64513 vrf-id 7
BGP table version 18
RIB entries 29, using 5568 bytes of memory
Peers 4, using 2896 KiB of memory
Peer groups 2, using 128 bytes of memory
Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd PfxSnt Desc
*10.0.1.1 4 64513 211 218 0 0 0 00:10:22 2 12 N/A
*10.0.1.2 4 64513 209 216 0 0 0 00:10:17 2 12 N/A
spine01(swp1) 4 64512 1512 1513 0 0 0 01:14:51 10 15 N/A
spine02(swp2) 4 64512 1512 1514 0 0 0 01:14:51 10 15 N/A
Total number of neighbors 4
* - dynamic neighbor
2 dynamic neighbor(s), limit 100
leaf01# show bgp vrf vrf-main ipv4 unicast neighbors 10.0.1.1 routes
BGP table version is 18, local router ID is 10.0.0.1, vrf id 7
Default local pref 100, local AS 64513
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
RPKI validation codes: V valid, I invalid, N Not found
Network Next Hop Metric LocPrf Weight Path
*>i10.1.1.0/24 10.0.1.1 100 0 i
*>i10.2.107.192/26 10.0.1.1 100 0 i
Displayed 2 routes and 26 total paths
leaf01# show bgp vrf vrf-main ipv4 unicast neighbors 10.0.1.2 routes
BGP table version is 18, local router ID is 10.0.0.1, vrf id 7
Default local pref 100, local AS 64513
Status codes: s suppressed, d damped, h history, * valid, > best, = multipath,
i internal, r RIB-failure, S Stale, R Removed
Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self
Origin codes: i - IGP, e - EGP, ? - incomplete
RPKI validation codes: V valid, I invalid, N Not found
Network Next Hop Metric LocPrf Weight Path
*=i10.1.1.0/24 10.0.1.2 100 0 i
*>i10.2.0.128/26 10.0.1.2 100 0 i
Displayed 2 routes and 26 total paths
# kubectl create deployment httpbin --image=docker.io/kong/httpbin --replicas=3
# kubectl expose deploy/httpbin --port=80 --target-port=80
# kubectl wait pods -l app=httpbin --for condition=Ready --timeout=60s
# kubectl get pod -l=app=httpbin -o wide -w
NAME READY STATUS RESTARTS AGE IP NODE ...
httpbin-dd48785fc-5n8bj 1/1 Running 0 86s 10.2.161.4 kube-node03 ...
httpbin-dd48785fc-f25xd 1/1 Running 0 86s 10.2.238.1 kube-node02 ...
httpbin-dd48785fc-lvnsn 1/1 Running 0 86s 10.2.0.129 kube-node01 ...
# kubectl get svc -l=app=httpbin
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
httpbin ClusterIP 10.1.1.190 <none> 80/TCP 52s
## test pod icmp ping
# for ip in $(kubectl get pod -l=app=httpbin -o jsonpath='{..status.podIP}'); do ping -c1 $ip; done
## test pod tcp connectivity
# for ip in $(kubectl get pod -l=app=httpbin -o jsonpath='{..status.podIP}'); do nc -zv $ip 80; done
## test pod http
# POD_URLS=$(kubectl get pod -l=app=httpbin -o=jsonpath='{ range .items[*] }http://{ .status.podIP }:80{"\n"}{ end }')
# for url in ${POD_URLS[@]}; do curl -sSLD/dev/stdout -o/dev/null "$url"; done
## test svc icmp ping, this should fail with ICMP error: Destination Port Unreachable
# SVC_IP=$(kubectl get svc/httpbin -o=jsonpath='{.spec.clusterIP}')
# ping -c1 $SVC_IP
## test svc tcp connectivity
# SVC_PORT=$(kubectl get svc/httpbin -o=jsonpath='{.spec.ports[0].targetPort}')
# nc -zv $SVC_IP $SVC_PORT
## test svc http
# SVC_URL=http://$SVC_IP:$SVC_PORT
# curl -sSLD/dev/stdout -o/dev/null "$SVC_URL"