도커가 설치되어있지 않다면 아래 시리즈 첫번째글로 먼저 설치한후 진행
https://kubernetes.io/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
# kubernetes cluster node이름이 hostname으로 자동 설정됨 -> node이름으로 변경
sudo hostnamectl set-hostname master0
hostname
# hostname으로 요청가능하도록 dns에 추가
# 추후에 worker node의 아이피를 추가해주자
sudo vi /etc/hosts
10.1.0.237 master0
# firewalld stop
sudo ufw disable
sudo ufw status
# SELinux set -> centos에서는 있었으나 우분투에선느 안먹히네요 스킵
# setenforce
# sesatus
# setenforce 0
# sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux
# swap off
sudo swapoff -a
# 재부팅시에도 swap이 켜지지 않도록 설정
# sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
# sudo sed -i '/ swap / s/^/#/' /etc/fstab
# free로 확인시 swap 0으로 이미 꺼져있었다
free
https://kubernetes.io/ko/docs/setup/production-environment/container-runtimes/
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
sudo modprobe overlay
sudo modprobe br_netfilter
# 필요한 sysctl 파라미터를 설정하면, 재부팅 후에도 값이 유지된다.
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 재부팅하지 않고 sysctl 파라미터 적용하기
sudo sysctl --system
https://kubernetes.io/ko/docs/setup/production-environment/tools/kubeadm/install-kubeadm/
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/kubelet-integration/#the-kubelet-drop-in-file-for-systemd
#apt 패키지 색인을 업데이트하고, 쿠버네티스 apt 리포지터리를 사용하는 데 필요한 패키지를 설치한다.
sudo apt-get update
sudo apt-get install -y apt-transport-https ca-certificates curl
# 구글 클라우드의 공개 사이닝 키를 다운로드 한다.
sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg
# 쿠버네티스 apt 리포지터리를 추가한다.
echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
# 설치할 버전 확인
sudo apt-get update
apt-cache madison kubelet
kubelet | 1.26.0-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
kubelet | 1.25.5-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
kubelet | 1.25.4-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
apt-cache madison kubeadm
kubeadm | 1.26.0-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
kubeadm | 1.25.5-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
kubeadm | 1.25.4-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
apt-cache madison kubectl
kubectl | 1.26.0-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
kubectl | 1.25.5-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
kubectl | 1.25.4-00 | https://apt.kubernetes.io kubernetes-xenial/main amd64 Packages
sudo apt-get update
# 지정버전 설치
sudo apt-get install kubelet=1.26.0-00 \
kubeadm=1.26.0-00 \
kubelet=1.26.0-00
# 자동으로 최신버전 설치
# sudo apt-get install -y kubelet kubeadm kubectl
# 버전 확인 1.26버전은 kubectl설치시 kustomize가 끼워팔기로 같이 깔리네요
kubectl version --short
kubeadm version
kubelet --version
# kubelet, kubeadm, kubectl을 설치하고 해당 버전을 고정한다.
sudo apt-mark hold kubelet kubeadm kubectl
# kubernetes root dir 설정 위에 두번쨰 링크 내용을 참고하여 작성
# debian -> /etc/default/kubelet
# rpm -> /etc/sysconfig/kubelet
cat <<EOF > /etc/default/kubelet
KUBELET_EXTRA_ARGS=--root-dir="/container/k8s"
EOF
# root dir 가 변경되어도 cluster가 생성되지 않아 파일이 생성되어있진 않다.
# kubelet start
sudo systemctl daemon-reload
sudo systemctl enable kubelet && sudo systemctl start kubelet
https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/
# 별도 설정없이 기본 생성
kubeadm init
# 아래와 같은 메세지가 뜨면 성공
# init시엔 항상 긴장된다 여기서 많이 실패를 경험함 제발떠라!!....
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a Pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join <control-plane-host>:<control-plane-port> --token <token> --discovery-token-ca-cert-hash sha256:<hash>
# 위에서 인증서 복사 세줄을 적용하자, 사용할 사용자가 다르면 적절하게 변경하자
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
sudo systemctl status kubelet
# 상태를 조회해보자
kubectl get node
kubectl describe node master0
# 컨트롤플레인으로 생성되는것들 조회
# network plugin을 설치하기전에 남겨두지않아 그후 내용으로라도 올린다...
# coredns가 pendding 상태에 0/1이 되면 정상이다.
kubectl get po -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-787d4945fb-5df88 1/1 Running 2 (28m ago) 22h
kube-system coredns-787d4945fb-8wgwn 1/1 Running 2 (28m ago) 22h
kube-system etcd-master0 1/1 Running 2 (28m ago) 22h
kube-system kube-apiserver-master0 1/1 Running 2 (28m ago) 22h
kube-system kube-controller-manager-master0 1/1 Running 2 (28m ago) 22h
kube-system kube-proxy-j6xkn 1/1 Running 2 (28m ago) 22h
kube-system kube-scheduler-master0 1/1 Running 2 (28m ago) 22h
kubeadm config print init-defaults
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.2.3.4
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: node
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.k8s.io
kind: ClusterConfiguration
kubernetesVersion: 1.26.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
# 클러스터 설정을 파일로 정의하여 생성
sudo kubeadm init --config ${파일경로!!} --upload-certs --v=5 --ignore-preflight-errors=all
# pod가 생성될 대역, service가 생성될 대역만 설정하여 생성
sudo kubeadm init --pod-network-cidr=10.32.0.0/12 --apiserver-advertise-address=192.168.199.3
sudo kubeadm reset
sudo rm -rf /etc/kubernetes
rm -rf ~/.kube
# 아래 부분까지 삭제하고싶으면 깔끔하게 밀어버리자
sudo rm -rf /var/lib/etcd
sudo rm -rf /var/lib/calicao
sudo rm -rf /var/lib/cni/
# 혹시 생성된 iptable이 미심쩍다면 iptable도 밀어버리자
# 예전에 어디선가 찾았던건데 출처는 모르겠다...
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
https://docs.cilium.io/en/stable/gettingstarted/k8s-install-default/
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
# arm은 aarch64로 변경 -> 설치문서랑 다르게 내맴데로 변경한 부분
CLI_ARCH=amd64
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
# Linux kernel >= 4.9.17을 사용해야 한다길래 한번 확인해봄
# 현재 리눅스 커널버전 확인
uname -r
cilium install
cilium status --wait
kubectl taint node master0 node-role.kubernetes.io/control-plane:NoSchedule-
sudo apt update & sudo apt install resolvconf
# dns에 구글 추가(역시 갇 구글)
sudo vi /etc/resolv.conf
nameserver 8.8.4.4
nameserver 8.8.8.8
sudo service resolvconf restart
cilium connectivity test
================ 너무길어서 생략 ===================
📋 Test Report
❌ 5/31 tests failed (12/151 actions), 0 tests skipped, 1 scenarios skipped:
Test [no-policies]:
❌ no-policies/pod-to-world/http-to-one-one-one-one-0: cilium-test/client-6f6788d7cc-8chrj (10.0.0.227) -> one-one-one-one-http (one.one.one.one:80)
❌ no-policies/pod-to-world/https-to-one-one-one-one-0: cilium-test/client-6f6788d7cc-8chrj (10.0.0.227) -> one-one-one-one-https (one.one.one.one:443)
❌ no-policies/pod-to-world/https-to-one-one-one-one-index-0: cilium-test/client-6f6788d7cc-8chrj (10.0.0.227) -> one-one-one-one-https-index (one.one.one.one:443)
❌ no-policies/pod-to-world/http-to-one-one-one-one-1: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-http (one.one.one.one:80)
❌ no-policies/pod-to-world/https-to-one-one-one-one-1: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-https (one.one.one.one:443)
❌ no-policies/pod-to-world/https-to-one-one-one-one-index-1: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-https-index (one.one.one.one:443)
Test [to-entities-world]:
❌ to-entities-world/pod-to-world/http-to-one-one-one-one-0: cilium-test/client-6f6788d7cc-8chrj (10.0.0.227) -> one-one-one-one-http (one.one.one.one:80)
❌ to-entities-world/pod-to-world/http-to-one-one-one-one-1: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-http (one.one.one.one:80)
Test [client-egress-l7]:
❌ client-egress-l7/pod-to-world/http-to-one-one-one-one-1: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-http (one.one.one.one:80)
Test [client-egress-l7-named-port]:
❌ client-egress-l7-named-port/pod-to-world/http-to-one-one-one-one-0: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-http (one.one.one.one:80)
Test [to-fqdns]:
❌ to-fqdns/pod-to-world/http-to-one-one-one-one-0: cilium-test/client-6f6788d7cc-8chrj (10.0.0.227) -> one-one-one-one-http (one.one.one.one:80)
❌ to-fqdns/pod-to-world/http-to-one-one-one-one-1: cilium-test/client2-bc59f56d5-vc69z (10.0.0.237) -> one-one-one-one-http (one.one.one.one:80)
connectivity test failed: 5 tests failed
kubectl apply -f nginx-deploy.yaml
# pod ip 확인
kubectl get po -o wide
kubectl get svc
# service의 endpoint로 두개의 pod가 잘 연결되었나 확인함
kubectl describe svc nginx-service
# 요청해보니 nginx 기본 html을 잘받는다.
curl localhost:30007
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
.... 생략 ....
# nginx-deploy.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
selector:
app: nginx
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
nodePort: 30007
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
selector:
matchLabels:
app: nginx
replicas: 2
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80