We will be creating a multi-master Kubernetes setup on bare metal servers but the same can be used in any of the cloud providers to set up the Cluster. We would be using the following servers to set up the cluster.
Node info:
Node Name | IP | Purpose |
kub01 | 172.21.48.60 | k8s master / etcd node |
kub02 | 172.21.48.62 | k8s master / etcd node |
kub03 | 172.21.48.80 | k8s master / etcd node |
kubminion01 | 172.21.48.92 | k8s minion |
kublb01 | 172.21.48.93 | nginx proxy/lb |
Set the hostname on each of the servers in the following way:
root@iZt4n2tnaspjdn255mv8bzZ:~# hostnamectl set-hostname kub01
root@iZt4n2tnaspjdn255mv8bzZ:~# logout
Connection to 172.21.48.60 closed.
MacBook-Air:~ ravigadgil-air$ ssh -A root@172.21.48.60
Welcome to Ubuntu 18.04.2 LTS (GNU/Linux 4.15.0-52-generic x86_64)
* Documentation: https://help.ubuntu.com
* Management: https://landscape.canonical.com
* Support: https://ubuntu.com/advantage
Last login: Mon Aug 19 13:34:22 2019 from 192.168.254.182
root@kub01:~#
Update the entry in /etc/hosts in all the servers so that they know each of the servers with their Node Names.
root@kub01:~# cat /etc/hosts
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.21.48.60 iZt4n2tnaspjdn255mv8bzZ iZt4n2tnaspjdn255mv8bzZ
172.21.48.60 kub01
172.21.48.62 kub02
172.21.48.80 kub03
172.21.48.92 kubminion01
172.21.48.93 kublb01
You can ping the servers with their Node Names to check if all is running fine.
Turn off the swap in all the Nodes.
root@kub01:~# sudo swapoff -a
root@kub01:~# sudo sed -i 's/^.*swap/#&/' /etc/fstab
Now run following commands on all the Nodes to install the required packages.
root@kub01:~# apt-get update && apt-get install -y curl apt-transport-https
Install the latest version of Docker in each of the Nodes.
root@kub01:~# curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
root@kub01:~# cat <<EOF >/etc/apt/sources.list.d/docker.list
deb https://download.docker.com/linux/$(lsb_release -si | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable
EOF
root@kub01:~# apt-get update && apt-get install -y docker-ce docker
Need to configure iptables for forwarding all incoming packets on all the Nodes.
root@kub01:~# cat <<__EOF__ > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
__EOF__
root@kub01:~# sysctl --system
root@kub01:~# sysctl -p /etc/sysctl.d/k8s.conf
root@kub01:~# iptables -P FORWARD ACCEPT
Install kubeadm, kubelet and kubectl.
root@kub01:~# curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
root@kub01:~# cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF
root@kub01:~# apt-get update
root@kub01:~# apt-get install -y kubelet kubeadm kubectl
Create the required certificates in one of the Master Nodes to authenticate master Nodes with the rest of Master Nodes.
root@kub01:~# mkdir -p ~/k8s/crt ~/k8s/key ~/k8s/csr
root@kub01:~# cat <<__EOF__>~/k8s/openssl.cnf
[ req ]
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_ca ]
basicConstraints = critical, CA:TRUE
keyUsage = critical, digitalSignature, keyEncipherment, keyCertSign
[ v3_req_etcd ]
basicConstraints = CA:FALSE
keyUsage = critical, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth, clientAuth
subjectAltName = @alt_names_etcd
[ alt_names_etcd ]
DNS.1 = kub01
DNS.2 = kub02
DNS.3 = kub03
IP.1 = 172.21.48.60
IP.2 = 172.21.48.62
IP.3 = 172.21.48.80
__EOF__
Generate etcd ca which will be used to authenticate all the etcd certs.
root@kub01:~# openssl genrsa -out ~/k8s/key/etcd-ca.key 4096
root@kub01:~# openssl req -x509 -new -sha256 -nodes -key ~/k8s/key/etcd-ca.key -days 3650 -out ~/k8s/crt/etcd-ca.crt -subj "/CN=etcd-ca" -extensions v3_ca -config ~/k8s/openssl.cnf
Generate etcd local and peer certs.
root@kub01:~# openssl genrsa -out ~/k8s/key/etcd.key 4096
root@kub01:~# openssl req -new -sha256 -key ~/k8s/key/etcd.key -subj "/CN=etcd" -out ~/k8s/csr/etcd.csr
root@kub01:~# openssl x509 -req -in ~/k8s/csr/etcd.csr -sha256 -CA ~/k8s/crt/etcd-ca.crt -CAkey ~/k8s/key/etcd-ca.key -CAcreateserial -out ~/k8s/crt/etcd.crt -days 365 -extensions v3_req_etcd -extfile ~/k8s/openssl.cnf
root@kub01:~# openssl genrsa -out ~/k8s/key/etcd-peer.key 4096
root@kub01:~# openssl req -new -sha256 -key ~/k8s/key/etcd-peer.key -subj "/CN=etcd-peer" -out ~/k8s/csr/etcd-peer.csr
root@kub01:~# openssl x509 -req -in ~/k8s/csr/etcd-peer.csr -sha256 -CA ~/k8s/crt/etcd-ca.crt -CAkey ~/k8s/key/etcd-ca.key -CAcreateserial -out ~/k8s/crt/etcd-peer.crt -days 365 -extensions v3_req_etcd -extfile ~/k8s/openssl.cnf
Install etcd packages in Master Nodes.
root@kub01:~# wget https://github.com/coreos/etcd/releases/download/v3.3.4/etcd-v3.3.4-linux-amd64.tar.gz
root@kub01:~# tar -zxvf etcd-v3.3.4-linux-amd64.tar.gz
root@kub01:~# cp etcd-v3.3.4-linux-amd64/etcd* /usr/local/bin/
Create etcd system configuration for each of the master nodes.
root@kub01:~# cd ~
root@kub01:~# cat <<__EOF__>~/etcd.service
[Unit]
Description=etcd
Documentation=https://github.com/coreos
[Service]
ExecStart=/usr/local/bin/etcd \\
--name kub01 \\
--cert-file=/etc/etcd/pki/etcd.crt \\
--key-file=/etc/etcd/pki/etcd.key \\
--peer-cert-file=/etc/etcd/pki/etcd-peer.crt \\
--peer-key-file=/etc/etcd/pki/etcd-peer.key \\
--trusted-ca-file=/etc/etcd/pki/etcd-ca.crt \\
--peer-trusted-ca-file=/etc/etcd/pki/etcd-ca.crt \\
--peer-client-cert-auth \\
--client-cert-auth \\
--initial-advertise-peer-urls https://172.21.48.60:2380 \\
--listen-peer-urls https://172.21.48.60:2380 \\
--listen-client-urls https://172.21.48.60:2379,http://127.0.0.1:2379 \\
--advertise-client-urls https://172.21.48.60:2379 \\
--initial-cluster-token etcd-cluster-0 \\
--initial-cluster kub01=https://172.21.48.60:2380,kub02=https://172.21.48.62:2380,kub03=https://172.21.48.80:2380 \\
--data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
__EOF__
Use the following command to copy etcd config and certificate to rest of the Master Nodes.
root@kub01:~# for master in kub01 kub02 kub03; do ssh ${master} "test -d /etc/etcd/pki && rm -rf /etc/etcd/pki" ; ssh ${master} "test -d /var/lib/etcd && rm -rf /var/lib/etcd" ; ssh ${master} "mkdir -p /etc/etcd/pki ; mkdir -p /var/lib/etcd" ; scp ~/k8s/crt/etcd* ~/k8s/key/etcd* ${master}:/etc/etcd/pki/; scp etcd.service ${master}:/etc/systemd/system/etcd.service ; done
Update the following entries on each master Node with its own IP's.
--name
--initial-advertise-peer-urls
--listen-peer-urls
--listen-client-urls
--advertise-client-urls
Use the following command to start the service on each Master Node or can do it individually on each Master Node.
root@kub01:~# for master in kub01 kub02 kub03; do ssh ${master} "systemctl daemon-reload" ; ssh ${master} "systemctl start etcd" ; done
root@kub01:~# systemctl status etcd.service
● etcd.service - etcd
Loaded: loaded (/etc/systemd/system/etcd.service; disabled; vendor preset: enabled)
Active: active (running) since Mon 2019-08-19 16:45:45 CST; 6min ago
Docs: https://github.com/coreos
Main PID: 11460 (etcd)
Tasks: 7 (limit: 4681)
CGroup: /system.slice/etcd.service
└─11460 /usr/local/bin/etcd --name kub01 --cert-file=/etc/etcd/pki/etcd.crt --key-file=/etc/etcd/pki/etcd.key --peer-cert-file=/etc/etcd/pki/etcd-peer.crt --peer-key-file=/etc/etcd/pki/etcd-pee
Aug 19 16:51:56 kub01 etcd[11460]: failed to reach the peerURL(https://172.21.48.80:2380) of member c383a7e247b24ca9 (Get https://172.21.48.80:2380/version: dial tcp 172.21.48.80:2380: getsockopt: connect
Aug 19 16:51:56 kub01 etcd[11460]: cannot get the version of member c383a7e247b24ca9 (Get https://172.21.48.80:2380/version: dial tcp 172.21.48.80:2380: getsockopt: connection refused)
Aug 19 16:51:56 kub01 etcd[11460]: peer c383a7e247b24ca9 became active
Aug 19 16:51:56 kub01 etcd[11460]: established a TCP streaming connection with peer c383a7e247b24ca9 (stream Message reader)
Aug 19 16:51:56 kub01 etcd[11460]: established a TCP streaming connection with peer c383a7e247b24ca9 (stream MsgApp v2 reader)
Aug 19 16:51:57 kub01 etcd[11460]: established a TCP streaming connection with peer c383a7e247b24ca9 (stream MsgApp v2 writer)
Aug 19 16:51:57 kub01 etcd[11460]: established a TCP streaming connection with peer c383a7e247b24ca9 (stream Message writer)
Aug 19 16:52:00 kub01 etcd[11460]: updating the cluster version from 3.0 to 3.3
Aug 19 16:52:00 kub01 etcd[11460]: updated the cluster version from 3.0 to 3.3
Aug 19 16:52:00 kub01 etcd[11460]: enabled capabilities for version 3.3
To check the cluster health use the following command.
root@kub01:~# etcdctl --ca-file /etc/etcd/pki/etcd-ca.crt --cert-file /etc/etcd/pki/etcd.crt --key-file /etc/etcd/pki/etcd.key cluster-health
member 7ad503dbead712c9 is healthy: got healthy result from https://172.21.48.62:2379
member c383a7e247b24ca9 is healthy: got healthy result from https://172.21.48.80:2379
member c453d4efca062858 is healthy: got healthy result from https://172.21.48.60:2379
cluster is healthy
To list all the member nodes in the cluster.
root@kub01:~# etcdctl --ca-file /etc/etcd/pki/etcd-ca.crt --cert-file /etc/etcd/pki/etcd.crt --key-file /etc/etcd/pki/etcd.key member list
7ad503dbead712c9: name=kub02 peerURLs=https://172.21.48.62:2380 clientURLs=https://172.21.48.62:2379 isLeader=false
c383a7e247b24ca9: name=kub03 peerURLs=https://172.21.48.80:2380 clientURLs=https://172.21.48.80:2379 isLeader=false
c453d4efca062858: name=kub01 peerURLs=https://172.21.48.60:2380 clientURLs=https://172.21.48.60:2379 isLeader=true
Create the kubeadm init file to define parameters for Kubernetes cluster to listen. advertiseAddress should match your etcd master endpoint. podSubnet is the IP range on which we want our pods to boot, it should be similar to the configuration we will be doing in flannel. create and execute the file in each of the master Node.
root@kub01:~# cat <<__EOF__>~/kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
certSANs:
- "172.21.48.60"
controlPlaneEndpoint: "172.21.48.60:6443"
etcd:
external:
endpoints:
- https://172.21.48.60:2379
- https://172.21.48.62:2379
- https://172.21.48.80:2379
caFile: /etc/etcd/pki/etcd-ca.crt
certFile: /etc/etcd/pki/etcd.crt
keyFile: /etc/etcd/pki/etcd.key
networking:
podSubnet: 10.244.0.0/16
apiServer:
certSANs:
- "172.21.48.60"
- "172.21.48.62"
- "172.21.48.80"
- "172.21.48.93"
certificatesDir: "/etc/kubernetes/pki"
__EOF__
Run the following command to initialize the configuration.
root@kub01:~# kubeadm init --config ~/kubeadm-init.yaml
Copy certificate to each of the master nodes.
root@kub01:~# scp -rp /etc/kubernetes/pki kub02:/etc/kubernetes/
root@kub01:~# scp -rp /etc/kubernetes/pki kub03:/etc/kubernetes/
Run following commands on the Master nodes to access the nodes.
root@kub01:~# mkdir -p $HOME/.kube
root@kub01:~# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@kub01:~# sudo chown $(id -u):$(id -g) $HOME/.kube/config
Check the health of nodes in the cluster.
root@kub01:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kub01 NotReady master 10m v1.15.2
kub02 NotReady master 9m1s v1.15.2
kub03 NotReady master 8m56s v1.15.2
Now configure the pods networking. The configuration in kube-flannel.yml should match the podsubnet defined in kubeadm-init.yaml file.
root@kub01:~# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
Following command will give you details of all the configuration we have deployed to setups our cluster.
root@kub01:~# kubectl get all --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system pod/coredns-5c98db65d4-jgr9m 1/1 Running 0 19m
kube-system pod/coredns-5c98db65d4-xlgwp 1/1 Running 0 19m
kube-system pod/kube-apiserver-kub01 1/1 Running 0 18m
kube-system pod/kube-apiserver-kub02 1/1 Running 0 18m
kube-system pod/kube-apiserver-kub03 1/1 Running 0 18m
kube-system pod/kube-controller-manager-kub01 1/1 Running 0 19m
kube-system pod/kube-controller-manager-kub02 1/1 Running 0 18m
kube-system pod/kube-controller-manager-kub03 1/1 Running 0 18m
kube-system pod/kube-flannel-ds-amd64-7vtfp 1/1 Running 0 2m55s
kube-system pod/kube-flannel-ds-amd64-qzjkb 1/1 Running 0 2m55s
kube-system pod/kube-flannel-ds-amd64-xknwk 1/1 Running 0 2m55s
kube-system pod/kube-proxy-m6rcg 1/1 Running 0 18m
kube-system pod/kube-proxy-m9z4x 1/1 Running 0 19m
kube-system pod/kube-proxy-zgbps 1/1 Running 0 18m
kube-system pod/kube-scheduler-kub01 1/1 Running 0 18m
kube-system pod/kube-scheduler-kub02 1/1 Running 0 18m
kube-system pod/kube-scheduler-kub03 1/1 Running 0 18m
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 19m
kube-system service/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 19m
NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
kube-system daemonset.apps/kube-flannel-ds-amd64 3 3 3 3 3 beta.kubernetes.io/arch=amd64 2m55s
kube-system daemonset.apps/kube-flannel-ds-arm 0 0 0 0 0 beta.kubernetes.io/arch=arm 2m55s
kube-system daemonset.apps/kube-flannel-ds-arm64 0 0 0 0 0 beta.kubernetes.io/arch=arm64 2m55s
kube-system daemonset.apps/kube-flannel-ds-ppc64le 0 0 0 0 0 beta.kubernetes.io/arch=ppc64le 2m55s
kube-system daemonset.apps/kube-flannel-ds-s390x 0 0 0 0 0 beta.kubernetes.io/arch=s390x 2m55s
kube-system daemonset.apps/kube-proxy 3 3 3 3 3 beta.kubernetes.io/os=linux 19m
NAMESPACE NAME READY UP-TO-DATE AVAILABLE AGE
kube-system deployment.apps/coredns 2/2 2 2 19m
NAMESPACE NAME DESIRED CURRENT READY AGE
kube-system replicaset.apps/coredns-5c98db65d4 2 2 2 19m
To add worker node to cluster add the following command on it.
root@kubminion01:/etc/kubernetes# kubeadm join 172.21.48.60:6443 --token cj6qwj.m7ofqlaxrc75mylx \
> --discovery-token-ca-cert-hash sha256:4ed6023a07334595806d98e9b7dc3b888eda5b50a8f9a4d185c14eccb661967c
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[WARNING SystemVerification]: this Docker version is not on the list of validated versions: 19.03.1. Latest validated version: 18.09
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
Now when you run check nodes on any of the master node the following output will come.
root@kub01:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kub01 Ready master 47m v1.15.2
kub02 Ready master 46m v1.15.2
kub03 Ready master 46m v1.15.2
kubminion01 Ready <none> 5m29s v1.15.2
To create a Kubernetes dashboard run the following command.
root@kub01:~# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
secret/kubernetes-dashboard-certs created
serviceaccount/kubernetes-dashboard created
role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created
deployment.apps/kubernetes-dashboard created
service/kubernetes-dashboard created
Create the helm configuration file.
root@kub01:~# cat <<__EOF__>~/helm-rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system
__EOF__
Apply the helm configuration.
root@kub01:~# kubectl create -f helm-rbac.yaml
serviceaccount/tiller created
clusterrolebinding.rbac.authorization.k8s.io/tiller created
Install the helm in the master node.
root@kub01:~# wget https://storage.googleapis.com/kubernetes-helm/helm-v2.9.1-linux-amd64.tar.gz
root@kub01:~# tar -xvzf helm-v2.9.1-linux-amd64.tar.gz
root@kub01:~# cp linux-amd64/helm /usr/local/bin/
Initialize the helm service.
root@kub01:~# helm init --service-account tiller --tiller-namespace kube-system
Creating /root/.helm
Creating /root/.helm/repository
Creating /root/.helm/repository/cache
Creating /root/.helm/repository/local
Creating /root/.helm/plugins
Creating /root/.helm/starters
Creating /root/.helm/cache/archive
Creating /root/.helm/repository/repositories.yaml
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com
Adding local repo with URL: http://127.0.0.1:8879/charts
$HELM_HOME has been configured at /root/.helm.
Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster.
Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy.
For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation
Happy Helming!
Now to access the Kubernetes dashboard we need to create a certificate that can be installed in our browser to authenticate. Run the following commands to create the certificate.
root@kub01:~# grep 'client-certificate-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.crt
root@kub01:~# grep 'client-key-data' ~/.kube/config | head -n 1 | awk '{print $2}' | base64 -d >> kubecfg.key
root@kub01:~# openssl pkcs12 -export -clcerts -inkey kubecfg.key -in kubecfg.crt -out kubecfg.p12 -name "kubernetes-dashboard-client"
Enter Export Password:
Verifying - Enter Export Password:
To provide the authorization to dashboard create the following service account and clusterrolebinding.
cat <<__EOF__>~/dashboard-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
__EOF__
cat <<__EOF__>~/dashboard-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
__EOF__
root@kub01:~# kubectl create -f dashboard-serviceaccount.yaml
serviceaccount/admin-user created
root@kub01:~# kubectl create -f dashboard-clusterrolebinding.yaml
clusterrolebinding.rbac.authorization.k8s.io/admin-user created
Copy the kubecfg.p12 file which have been created in your system from which you wanna access the Kubernetes dashboard and add this certificate to you browsers certificates. you can follow the following
link to check how to add the certificate to the browser.
After adding the certificate to the browser access the dashboard from any of the master node IP.
 |
kubernetes dashboard |
To get the token to access the dashboard run the following command.
root@kub01:~# kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
Name: admin-user-token-kt2c6
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: admin-user
kubernetes.io/service-account.uid: 41c3ea06-b792-4601-b3d0-934aa326dca6
Type: kubernetes.io/service-account-token
Data
====
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWt0MmM2Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI0MWMzZWEwNi1iNzkyLTQ2MDEtYjNkMC05MzRhYTMyNmRjYTYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.cV1Wsftjyw4cpbbk_wQBGzu2_E4PGOSiT5Wmt3T6Gahfhqf4ieooi1BKH9WC2d0l_9OkfSGVJv0rXTpfubLB-zgUKIOpXqJt2Bu4ftshjyO5IMJ4eogwv33fUZoz6621AJ-uBeIJgWhJxB4as4R1TrOAWsxVjT5rRY-9QRpijiIUBWpMoIPl_5pyAtkzaJQN2wa9bb-2cbBTfxCa8p33zfkQH_L_ZNHiEkClu-0ztB7ll871Ll_40-TMe9B3_ANfYLEEfDC71Fx3eG9igQGpRD4dKXtRZlNpMc4VYYGeqchbd3i8aYWWRC-xz6bpQgz9oUU0SeMCAfwrhfw-Kqkiww
ca.crt: 1025 bytes
Enter the token and access the dashboard.
 |
kubernetes dashboard |
Now we will create the high availability of our Kubernetes dashboard by using the Nginx proxy between our Master Nodes. Run the following commands in kublb01 server which we have created for our load balancer.
root@kublb01:~# apt-get install nginx nginx-extras
Take the backup of the original nginx.conf file and update the nginx.conf with the following configuration and your IP addresses.
root@kublb01:/etc/nginx# cp nginx.conf nginx.conf.bak
cat <<__EOF__>/etc/nginx/nginx.conf
worker_processes 1;
include /etc/nginx/modules-enabled/*.conf;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
#tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
stream {
upstream kubserver {
server 172.21.48.60:6443 weight=5 max_fails=3 fail_timeout=30s;
server 172.21.48.62:6443 weight=5 max_fails=3 fail_timeout=30s;
server 172.21.48.80:6443 weight=5 max_fails=3 fail_timeout=30s;
}
server {
listen 6443;
proxy_connect_timeout 1s;
proxy_timeout 3s;
proxy_pass kubserver;
}
}
__EOF__
Now access the Kubernetes dashboard with proxy Node IP.
 |
Kubernetes dashboard |
 |
Kubernetes dashboard |