hostnamectl set-hostname master1
hostnamectl set-hostname worker1
hostnamectl set-hostname worker2
echo "192.168.9.237 master1" >> /etc/hosts
echo "192.168.9.239 worker1" >> /etc/hosts
echo "192.168.9.235 worker2" >> /etc/hosts
systemctl stop firewalld
systemctl disable firewalld
swap memory 비활성화
swapoff -a
참고: swap 메모리를 영구적으로 비활성화하고 싶은 경우 /etc/fstab
에서 swap과 관련된 부분을 주석처리한다.
# /dev/mapper/cl-swap swap swap defaults 0 0
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
modprobe overlay
modprobe br_netfilter
cat << "EOF" | sudo tee -a /etc/sysctl.d/99-kubernetes-cri.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
VERSION=1.19
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/CentOS_7/devel:kubic:libcontainers:stable.repo
curl -L -o /etc/yum.repos.d/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable:cri-o:${VERSION}/CentOS_7/devel:kubic:libcontainers:stable:cri-o:${VERSION}.repo
yum -y install cri-o
systemctl enable crio
systemctl start crio
sudo rm -rf /etc/cni/net.d/100-crio-bridge.conf
sudo rm -rf /etc/cni/net.d/200-loopback.conf
/etc/crio/crio.conf 수정
plugin_dirs: "/opt/cni/bin" 추가
max pid limit 설정이 필요한 경우 pids_limit를 /proc/sys/kernel/pid_max보다 낮은 값으로 설정
pids_limit = 32768
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubeadm-1.19.4-0 kubelet-1.19.4-0 kubectl-1.19.4-0
systemctl enable kubelet
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.9.237 # master ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/crio/crio.sock
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.19.4 # 설치한 kubeadm, kubelet, kubectl과 동일한 버전
controlPlaneEndpoint: 192.168.9.237:6443 # master ip 지정, port는 반드시 6443
imageRepository: k8s.gcr.io
networking:
serviceSubnet: 172.1.1.0/24 # 사용할 service의 ip cidr
podSubnet: 10.1.1.0/24 # 사용할 pod의 ip cidr
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
kubeadm init --config=kubeadm-config.yaml
정상적으로 bootstrap된 결과
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.9.237:6443 --token 3l2l3y.x063t2xjebsd19vr \
--discovery-token-ca-cert-hash sha256:7356d8679d97a18d9c71a87129924549af7f7041f9b57237fb4a24b36b82f730 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.9.237:6443 --token 3l2l3y.x063t2xjebsd19vr \
--discovery-token-ca-cert-hash sha256:7356d8679d97a18d9c71a87129924549af7f7041f9b57237fb4a24b36b82f730
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master1 ~]# kubectl get pods
The connection to the server localhost:8080 was refused - did you specify the right host or port?
--cri-socket=/var/run/crio/crio.sock
옵션 추가) kubeadm join 192.168.9.237:6443 --token 3l2l3y.x063t2xjebsd19vr \
--discovery-token-ca-cert-hash sha256:7356d8679d97a18d9c71a87129924549af7f7041f9b57237fb4a24b36b82f730 --cri-socket=/var/run/crio/crio.sock
--cri-socket=/var/run/crio/crio.sock
옵션의 경우, 호스트에서 crio runtime만 있다면 쓰지 않아도 되지만 다른 runtime이 있다면 사용해야 한다.
If empty kubeadm will try to auto-detect this value;
use this option only if you have more than one CRI installed or if you have non-standard CRI socket.
kubeadm token create --print-join-command
[root@master1 ~]# kubeadm token create --print-join-command
W0226 11:24:46.962616 45387 kubelet.go:200] cannot automatically set CgroupDriver when starting the Kubelet: cannot execute 'docker info -f {{.CgroupDriver}}': executable file not found in $PATH
W0226 11:24:46.976848 45387 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
kubeadm join 192.168.9.237:6443 --token ugsf4f.ra2qnn64ffhkujua --discovery-token-ca-cert-hash sha256:7356d8679d97a18d9c71a87129924549af7f7041f9b57237fb4a24b36b82f730
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 NotReady master 2m30s v1.19.4
worker1 NotReady <none> 102s v1.19.4
worker2 NotReady <none> 104s v1.19.4
Node의 상태가 NotReady인 이유는 kubelet에서 찾을 수 있다.
Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: No CNI configuration file in /etc/cni/net.d/. Has your network provider started?
calico cni 설치하기
curl https://docs.projectcalico.org/archive/v3.17/manifests/calico.yaml -O
kubectl apply -f calico.yaml
노드 상태 확인하기
[root@master1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master1 Ready master 10m v1.19.4
worker1 Ready <none> 9m31s v1.19.4
worker2 Ready <none> 9m33s v1.19.4