hostnamectl set-hostname master1
hostnamectl set-hostname master2
hostnamectl set-hostname master3
hostnamectl set-hostname worker1
hostnamectl set-hostname worker2
echo "192.168.9.235 master1" >> /etc/hosts
echo "192.168.9.236 master2" >> /etc/hosts
echo "192.168.9.237 master3" >> /etc/hosts
echo "192.168.9.238 worker1" >> /etc/hosts
echo "192.168.9.239 worker2" >> /etc/hosts
systemctl stop firewalld
systemctl disable firewalld
swap memory 비활성화
swapoff -a
참고: swap 메모리를 영구적으로 비활성화하고 싶은 경우 /etc/fstab
에서 swap과 관련된 부분을 주석처리한다.
# /dev/mapper/cl-swap swap swap defaults 0 0
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
modprobe overlay
modprobe br_netfilter
cat << "EOF" | sudo tee -a /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
VERSION=1.19
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:${VERSION}/CentOS_7/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo
yum -y install cri-o
systemctl enable crio
systemctl start crio
sudo rm -rf /etc/cni/net.d/100-crio-bridge.conf
sudo rm -rf /etc/cni/net.d/200-loopback.conf
/etc/crio/crio.conf 수정
plugin_dirs: "/opt/cni/bin" 추가
max pid limit 설정이 필요한 경우 pids_limit를 /proc/sys/kernel/pid_max보다 낮은 값으로 설정
pids_limit = 32768
master node간의 HA를 위해 설치한다.
yum install -y keepalived
keepalived 세팅
각 master node의 /etc/keepalived/keepalived.conf
경로에서 다음 내용을 수정한다.
vrrp_instance VI_1 {
state MASTER # 마스터 하나만 Master, 그 외의 마스터는 BACKUP으로 설정
interface ens192 # 사용하는 인터페이스
virtual_router_id 51
priority 100 # 사용 가능한 마스터 중에서 우선 순위가 제일 높은 마스터에 VIP가 부여된다
advert_int 1
authentication {
auth_type PASS
auth_pass 1234 # 패스워드 설정
}
virtual_ipaddress {
192.168.9.10 # VIP 설정(같은 네트워크 대역대, 이용 가능한 IP로 결정)
}
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 51
priority 99
advert_int 1
authentication {
auth_type PASS
auth_pass 1234
}
virtual_ipaddress {
192.168.9.10
}
}
vrrp_instance VI_1 {
state BACKUP
interface ens192
virtual_router_id 51
priority 98
advert_int 1
authentication {
auth_type PASS
auth_pass 1234
}
virtual_ipaddress {
192.168.9.10
}
}
모든 master에서 keepalived 재시작 후 ping 테스트
systemctl restart keepalived
systemctl enable keepalived
[root@master2 ~]# ping 192.168.9.10
PING 192.168.9.10 (192.168.9.10) 56(84) bytes of data.
64 bytes from 192.168.9.10: icmp_seq=1 ttl=64 time=0.317 ms
64 bytes from 192.168.9.10: icmp_seq=3 ttl=64 time=0.283 ms
^C
--- 192.168.9.10 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 78ms
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubeadm-1.19.4-0 kubelet-1.19.4-0 kubectl-1.19.4-0
systemctl enable kubelet
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.9.235 # master1 ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/crio/crio.sock
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.19.4 # 설치한 kubeadm, kubelet, kubectl과 동일한 버전
controlPlaneEndpoint: 192.168.9.10:6443 # vip 지정, port는 반드시 6443
imageRepository: k8s.gcr.io
networking:
serviceSubnet: 172.1.1.0/24 # 사용할 service의 ip cidr
podSubnet: 10.1.1.0/24 # 사용할 pod의 ip cidr
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
--upload-certs
옵션을 추가 kubeadm init --config=kubeadm-config.yaml --upload-certs
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 192.168.9.10:6443 --token n8iv08.7lu86gvnce5edh8p \
--discovery-token-ca-cert-hash sha256:b113a82f167150fe751943010a1d23dca4e777f8643df7fbd54a15c55e5e83f3 \
--control-plane --certificate-key 411ccb129bc974197111e12a88c11430e9c496b25c6185cd2be7cfa369c6b521
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.9.10:6443 --token n8iv08.7lu86gvnce5edh8p \
--discovery-token-ca-cert-hash sha256:b113a82f167150fe751943010a1d23dca4e777f8643df7fbd54a15c55e5e83f3
mkdir -p $HOME/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master1 ~]# kubectl get pods
The connection to the server localhost:8080 was refused - did you specify the right host or port?
kubeadm join 192.168.9.10:6443 --token n8iv08.7lu86gvnce5edh8p \
--discovery-token-ca-cert-hash sha256:b113a82f167150fe751943010a1d23dca4e777f8643df7fbd54a15c55e5e83f3 \
--control-plane --certificate-key 411ccb129bc974197111e12a88c11430e9c496b25c6185cd2be7cfa369c6b521
mkdir -p $HOME/.kube
sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm join 192.168.9.10:6443 --token n8iv08.7lu86gvnce5edh8p \
--discovery-token-ca-cert-hash sha256:b113a82f167150fe751943010a1d23dca4e777f8643df7fbd54a15c55e5e83f3
kubeadm token create --print-join-command
[root@master1 ~]# kubeadm token create --print-join-command
W0226 11:24:46.962616 45387 kubelet.go:200] cannot automatically set CgroupDriver when starting the Kubelet: cannot execute 'docker info -f {{.CgroupDriver}}': executable file not found in $PATH
W0226 11:24:46.976848 45387 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join 192.168.9.237:6443 --token ugsf4f.ra2qnn64ffhkujua --discovery-token-ca-cert-hash sha256:7356d8679d97a18d9c71a87129924549af7f7041f9b57237fb4a24b36b82f730
calico cni 설치하기
curl https://docs.projectcalico.org/archive/v3.17/manifests/calico.yaml -O
kubectl apply -f calico.yaml
[root@master1 ~]# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master1 Ready master 5m3s v1.19.4 192.168.9.235 <none> CentOS Linux 8 (Core) 4.18.0-193.el8.x86_64 cri-o://1.19.2
master2 Ready master 34s v1.19.4 192.168.9.236 <none> CentOS Linux 8 (Core) 4.18.0-193.el8.x86_64 cri-o://1.19.2
master3 Ready master 36s v1.19.4 192.168.9.237 <none> CentOS Linux 8 (Core) 4.18.0-193.el8.x86_64 cri-o://1.19.2
worker1 Ready <none> 34s v1.19.4 192.168.9.238 <none> CentOS Linux 8 (Core) 4.18.0-193.el8.x86_64 cri-o://1.19.2
worker2 Ready <none> 36s v1.19.4 192.168.9.239 <none> CentOS Linux 8 (Core) 4.18.0-193.el8.x86_64 cri-o://1.19.2
K8S는 master node의 쿼럼이 만족되어야, HA를 유지할 수 있다.
이와 같은 이유 때문에 master node를 홀수개 유지하는데, 만약 2개의 master node만을 이용해서 cluster를 구성했을 때 master node 하나가 죽게 되면, 클러스터를 구성하는 마스터 노드가 하나가 되고 쿼럼을 만족하지 않기 때문에 동작을 중지한다.
이를 실험해보기 위해서, master1, master2 2개만으로 클러스터를 구성하고, master1을 halt시켰는데, 다음과 같은 에러가 발생했다.
[root@master2 ~]# kubectl get nodes
The connection to the server 192.168.9.10:6443 was refused - did you specify the right host or port?
master1, master2, master3 3개를 이용해서 쿼럼을 구성하고 master1을 halt 시켰을 때, 다음과 같이 cluster는 HA로 동작한다.
[root@master2 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 NotReady master 38m v1.19.4
master2 Ready master 7m18s v1.19.4
master3 Ready master 3m22s v1.19.4