user1@manager:~$ su
Password:
root@manager:/home/user1#
root@manager:/home/user1# cat /etc/fstab
# /etc/fstab: static file system information.
#
# Use 'blkid' to print the universally unique identifier for a
# device; this may be used with UUID= as a more robust way to name devices
# that works even if disks are added and removed. See fstab(5).
#
# <file system> <mount point> <type> <options> <dump> <pass>
# / was on /dev/sda5 during installation
UUID=5535a33d-56e1-4b72-a7d6-dcb513bd1a4b / ext4 errors=remount-ro 0 1
# /boot/efi was on /dev/sda1 during installation
UUID=0889-C55E /boot/efi vfat umask=0077 0 1
#/swapfile none swap sw 0 0
-> /swapfile에 주석 처리를 해야 함
root@manager:/home/user1# kubeadm init --apiserver-advertise-address 211.183.3.100
I0905 09:37:30.443478 3328 version.go:254] remote version is much newer: v1.25.0; falling back to: stable-1.21
[init] Using Kubernetes version: v1.21.14
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
...
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 211.183.3.100:6443 --token txl691.qficfxf8ramv1wd2 \
--discovery-token-ca-cert-hash sha256:55985f2922219aa58dcc448638c2bf33231eead697c49faaf12951b8a741817e
root@manager:/home/user1#
현재 사용자가 쿠버네티스의 admin 계정에 대한 설정 내용이 포함된 파일을 관리할 수 있다. -> 현재 사용자가 쿠버네티스 관리자로 등록된다.
내일 오전에 재부팅한 뒤 kubectl을 이용하여 클러스터에 대한 명령을 실행하는데 오류가 발생할 수 있다.
export KUBECONFIG=/etc/kubernetes/admin.conf
다시 한 번 입력해야 한다.
또는 KUBECONFIG=/etc/kubernetes/admin.conf를 처음 실행 시 자동으로 실행될 수 있도록 .bashrc에 등록한다.
root@manager:/home/user1# mkdir -p $HOME/.kube
root@manager:/home/user1# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@manager:/home/user1# sudo chown $(id -u):$(id -g) $HOME/.kube/config
root@manager:/home/user1# export KUBECONFIG=/etc/kubernetes/admin.conf
root@worker1:/home/user1# kubeadm join 211.183.3.100:6443 --token txl691.qficfxf8ramv1wd2 \
> --discovery-token-ca-cert-hash sha256:55985f2922219aa58dcc448638c2bf33231eead697c49faaf12951b8a741817e
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
root@manager:/home/user1# kubectl get node
NAME STATUS ROLES AGE VERSION
manager NotReady control-plane,master 10m v1.21.0
worker1 NotReady <none> 85s v1.21.0
worker2 NotReady <none> 80s v1.21.0
worker3 NotReady <none> 75s v1.21.0
root@manager:/home/user1# docker login
Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.
Username: ptah0414
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
root@manager:/home/user1# kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
root@manager:/home/user1# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-867d8d6bd8-zjtvl 0/1 Pending 0 48s
calico-node-px2vp 0/1 Init:0/3 0 47s
calico-node-qmb6g 0/1 Init:0/3 0 48s
calico-node-r9p9m 0/1 Init:ImagePullBackOff 0 48s
calico-node-vd987 0/1 Init:0/3 0 47s
coredns-558bd4d5db-jqbrm 0/1 Pending 0 12m
coredns-558bd4d5db-v55cn 0/1 Pending 0 12m
etcd-manager 1/1 Running 0 13m
kube-apiserver-manager 1/1 Running 0 13m
kube-controller-manager-manager 1/1 Running 0 13m
kube-proxy-2ttkz 1/1 Running 0 3m46s
kube-proxy-f25xm 1/1 Running 0 3m41s
kube-proxy-t47dm 1/1 Running 0 12m
kube-proxy-x2z4k 1/1 Running 0 3m51s
kube-scheduler-manager 1/1 Running 0 13m
-> 설치 중
root@manager:/home/user1# kubectl get pod -n kube-system | grep proxy
kube-proxy-2ttkz 1/1 Running 0 21m
kube-proxy-f25xm 1/1 Running 0 21m
kube-proxy-t47dm 1/1 Running 0 31m
kube-proxy-x2z4k 1/1 Running 0 21m
-> kube-proxy가 4개 있음 (master, worker1, worker2, worker3)
root@manager:/home/user1# kubectl get pod -n kube-system | grep scheduler
kube-scheduler-manager 1/1 Running 0 32m
-> manager의 것 하나가 있다.
root@manager:/home/user1# kubectl get pod -n kube-system | grep coredns
coredns-558bd4d5db-jqbrm 1/1 Running 0 48m
coredns-558bd4d5db-v55cn 1/1 Running 0 48m
노드에 생성된 각각의 pod는 별도의 독립적인 주소와 이에 해당하는 도메인 이름이 할당된다. 우리는 이 정보가 저장된 CoreDNS를 통해 통신시킬 수 있다.
worker2 노드에 2개 있다.
root@manager:/home/user1# kubectl get pod -n kube-system | grep calico
calico-kube-controllers-867d8d6bd8-zjtvl 1/1 Running 0 20m
calico-node-px2vp 1/1 Running 0 20m
calico-node-qmb6g 1/1 Running 0 20m
calico-node-r9p9m 1/1 Running 0 20m
calico-node-vd987 1/1 Running 0 20m
CNI(Container Network Interface)는 컨테이너들과의 통신을 위해 개발된 인터페이스이며 표준화를 통해 어떠한 컨테이너든 상관 없이 연결이 가능하도록 해준다.
CNI의 종류는 크게 L2와 L3로 구분한다.
L2
L3
L2와 L3 중 어느 것을 사용하더라도 pod 간 통신에는 문제 없다.
이미지 출처: https://medium.com/@jain.sm/flannel-vs-calico-a-battle-of-l2-vs-l3-based-networking-5a30cd0a3ebd
manager에 있는 api, 스케줄러, 컨트롤러 등은 모드 pod 형태로 생성되어 서비스를 제공한다. 또한 CoreDNS, calico도 pod 형태로 서비스가 제공된다. 따라서 이미지 다운로드 후 pod 생성의 단계를 거쳐야 한다.
root@manager:/home/user1# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-867d8d6bd8-zjtvl 1/1 Running 0 38m
calico-node-px2vp 1/1 Running 0 38m
calico-node-qmb6g 1/1 Running 0 38m
calico-node-r9p9m 1/1 Running 0 38m
calico-node-vd987 1/1 Running 0 38m
coredns-558bd4d5db-jqbrm 1/1 Running 0 50m
coredns-558bd4d5db-v55cn 1/1 Running 0 50m
etcd-manager 1/1 Running 0 50m
kube-apiserver-manager 1/1 Running 0 50m
kube-controller-manager-manager 1/1 Running 0 50m
kube-proxy-2ttkz 1/1 Running 0 41m
kube-proxy-f25xm 1/1 Running 0 41m
kube-proxy-t47dm 1/1 Running 0 50m
kube-proxy-x2z4k 1/1 Running 0 41m
kube-scheduler-manager 1/1 Running 0 50m
root@manager:/home/user1# wget https://docs.projectcalico.org/manifests/calico.yaml
--2022-09-05 10:36:42-- https://docs.projectcalico.org/manifests/calico.yaml
Resolving docs.projectcalico.org (docs.projectcalico.org)... 34.143.223.220, 52.220.193.16, 2406:da18:880:3802:371c:4bf1:923b:fc30, ...
Connecting to docs.projectcalico.org (docs.projectcalico.org)|34.143.223.220|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 234906 (229K) [text/yaml]
Saving to: ‘calico.yaml’
calico.yaml 100%[=====================================>] 229.40K 614KB/s in 0.4s
2022-09-05 10:36:43 (614 KB/s) - ‘calico.yaml’ saved [234906/234906]
root@manager:/home/user1# cat calico.yaml | grep image:
image: docker.io/calico/cni:v3.24.1
image: docker.io/calico/cni:v3.24.1
image: docker.io/calico/node:v3.24.1
image: docker.io/calico/node:v3.24.1
image: docker.io/calico/kube-controllers:v3.24.1
-> 이미지 3개로 구성되어 있음
docker pull docker.io/calico/cni:v3.24.1
docker.io/calico/node:v3.24.1
docker.io/calico/kube-controllers:v3.24.1
퍼블릭 환경의 경우에는 pod와 외부 사용자간 연결을 위해 손쉽게 LB를 이용할 수 있다. 하지만 on-premise에서는 LB 서비스를 이용할 수 없다. 이를 해결하기 위해 metallb를 사용하면 LB 이용이 가능하다.
root@manager:/home/user1# kubectl create ns metallb-system
namespace/metallb-system created
root@manager:/home/user1# kubectl get ns
NAME STATUS AGE
default Active 61m
kube-node-lease Active 61m
kube-public Active 61m
kube-system Active 61m
metallb-system Active 37s
root@manager:/home/user1# kubectl apply -f \
> https://raw.githubusercontent.com/metallb/metallb/v0.12.1/manifests/metallb.yaml
Warning: policy/v1beta1 PodSecurityPolicy is deprecated in v1.21+, unavailable in v1.25+
podsecuritypolicy.policy/controller created
podsecuritypolicy.policy/speaker created
serviceaccount/controller created
serviceaccount/speaker created
clusterrole.rbac.authorization.k8s.io/metallb-system:controller created
clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created
role.rbac.authorization.k8s.io/config-watcher created
role.rbac.authorization.k8s.io/pod-lister created
role.rbac.authorization.k8s.io/controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created
clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created
rolebinding.rbac.authorization.k8s.io/config-watcher created
rolebinding.rbac.authorization.k8s.io/pod-lister created
rolebinding.rbac.authorization.k8s.io/controller created
daemonset.apps/speaker created
deployment.apps/controller created
root@manager:/home/user1# mkdir k8slab ; cd k8slab
root@manager:/home/user1/k8slab# vi metallb.yaml
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 211.183.3.201-211.183.3.211 # 해당 네트워크 대역으로 LB 생성
- 211.183.3.231-211.183.3.239 # 해당 네트워크 대역으로 LB 생성
대역 1: 211.183.3.201 ~ .211
대역 2: 211.183.3.231 ~ .239
root@manager:/home/user1/k8slab# kubectl apply -f metallb.yaml
configmap/config created
root@manager:/home/user1/k8slab# vi nginx-deploy-svc.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec: # 아래는 ReplicaSet 설정
replicas: 3
selector: # 아래의 label 개수를 확인하여 pod 관리
matchLabels:
app: webserver
template: # 아래는 pod 구성
metadata:
name: my-webserver # Pod의 이름
labels:
app: webserver
spec: # 아래 부분은 컨테이너 구성 내용
containers:
- name: my-webserver # 컨테이너의 이름
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-lb
spec:
ports:
- name: web-port
port: 80 # LB의 포트
selector:
app: webserver
type: LoadBalancer
root@manager:/home/user1/k8slab# kubectl apply -f nginx-deploy-svc.yaml
deployment.apps/nginx-deployment created
service/nginx-lb created
root@manager:/home/user1/k8slab# kubectl get pod,svc
NAME READY STATUS RESTARTS AGE
pod/nginx-deployment-5fdcfffc56-5szpw 1/1 Running 0 67s
pod/nginx-deployment-5fdcfffc56-ctbhm 1/1 Running 0 67s
pod/nginx-deployment-5fdcfffc56-gqk7l 0/1 ContainerCreating 0 67s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 119m
service/nginx-lb LoadBalancer 10.110.108.162 211.183.3.201 80:30105/TCP 67s
LoadBalancer의 ip: 211.183.3.201
root@manager:/home/user1/k8slab# kubectl get pod -w
NAME READY STATUS RESTARTS AGE
nginx-deployment-5fdcfffc56-5szpw 1/1 Running 0 2m40s
nginx-deployment-5fdcfffc56-ctbhm 1/1 Running 0 2m40s
nginx-deployment-5fdcfffc56-gqk7l 1/1 Running 0 2m40s
-> 생성이 완료되었다.
root@manager:/home/user1/k8slab# cp nginx-deploy-svc.yaml nginx2-deploy-svc.yaml
root@manager:/home/user1/k8slab# vi nginx2-deploy-svc.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx2-deployment
spec: # 아래는 ReplicaSet 설정
replicas: 3
selector: # 아래의 label 개수를 확인하여 pod 관리
matchLabels:
app: webserver2
template: # 아래는 pod 구성
metadata:
name: my-webserver2 # Pod의 이름
labels:
app: webserver2
spec: # 아래 부분은 컨테이너 구성 내용
containers:
- name: my-webserver2 # 컨테이너의 이름
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx2-lb
spec:
ports:
- name: web2-port
port: 80
selector:
app: webserver2
type: LoadBalancer
root@manager:/home/user1/k8slab# k apply -f nginx2-deploy-svc.yaml
deployment.apps/nginx2-deployment created
service/nginx2-lb created
root@manager:/home/user1/k8slab# k get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 127m
nginx-lb LoadBalancer 10.110.108.162 211.183.3.201 80:30105/TCP 9m27s
nginx2-lb LoadBalancer 10.109.18.134 211.183.3.202 80:31146/TCP 15s
새로 생성한 LoadBalancer의 ip: 211.183.3.202
root@manager:/home/user1/k8slab# k delete -f nginx2-deploy-svc.yaml
deployment.apps "nginx2-deployment" deleted
service "nginx2-lb" deleted
우리 회사가 여러 고객들의 pod를 관리해주는 업체라면 각 고객사별로 별도의 namespace를 할당하고 해당 namespace에서 pod, svs 등을 이용한 서비스를 제공해야 할 것이다.
kube-system은 쿠버네티스 동작을 위한 pod가 관리되는 곳이다.
root@manager:/home/user1# k get pod -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-867d8d6bd8-zjtvl 1/1 Running 0 3h47m
calico-node-px2vp 1/1 Running 0 3h47m
calico-node-qmb6g 1/1 Running 0 3h47m
calico-node-r9p9m 1/1 Running 0 3h47m
calico-node-vd987 1/1 Running 0 3h47m
coredns-558bd4d5db-jqbrm 1/1 Running 0 3h59m
coredns-558bd4d5db-v55cn 1/1 Running 0 3h59m
etcd-manager 1/1 Running 0 3h59m
kube-apiserver-manager 1/1 Running 0 3h59m
kube-controller-manager-manager 1/1 Running 0 3h59m
kube-proxy-2ttkz 1/1 Running 0 3h50m
kube-proxy-f25xm 1/1 Running 0 3h50m
kube-proxy-t47dm 1/1 Running 0 3h59m
kube-proxy-x2z4k 1/1 Running 0 3h50m
kube-scheduler-manager 1/1 Running 0 3h59m
kubectl api-resources
쿠버네티스 클러스터를 여러 명이 동시에 사용해야 한다면 사용자마다 namespace를 별도로 생성하여 사용하도록 설정할 수 있다. (MSP에서 고객을 관리하는 방법)
namespace의 리소스들은 논리적으로 구분되어 있는 것이며 물리적으로 격리된 것이 아니므로 서로 다른 namespace에서 생성된 pod가 같은 노드에 존재할 수 있다.
namespace 생성 예시
root@manager:/home/user1# kubectl create ns testns
코드 배포 시 namespace를 추가적으로 작성해야 한다. (metadata)
root@manager:/home/user1/k8slab# k get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 3/3 3 3 135m
root@manager:/home/user1/k8slab# k delete deploy nginx-deployment
deployment.apps "nginx-deployment" deleted
root@manager:/home/user1/k8slab# vi nginx-deploy-svc.yaml
apiVersion: v1
kind: Namespace
metadata:
name: rapa # namespace 이름
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: rapa # namespace 이름
spec: # 아래는 ReplicaSet 설정
replicas: 3
selector: # 아래의 label 개수를 확인하여 pod 관리
matchLabels:
app: webserver
template: # 아래는 pod 구성
metadata:
name: my-webserver # Pod의 이름
labels:
app: webserver
spec: # 아래 부분은 컨테이너 구성 내용
containers:
- name: my-webserver # 컨테이너의 이름
image: nginx
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-lb
namespace: rapa # namespace 이름
spec:
ports:
- name: web-port
port: 80
selector:
app: webserver
type: LoadBalancer
apiVersion: v1 kind: Namespace metadata: name: rapa # namespace 이름
각 Deployment와 Service의 metadata에도 namespace를 추가해야된다.
root@manager:/home/user1/k8slab# k apply -f nginx-deploy-svc.yaml
namespace/rapa created
deployment.apps/nginx-deployment created
service/nginx-lb created
root@manager:/home/user1/k8slab# k get deploy,pod,svc -n rapa
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx-deployment 3/3 3 3 28s
NAME READY STATUS RESTARTS AGE
pod/nginx-deployment-5fdcfffc56-8fk7x 1/1 Running 0 28s
pod/nginx-deployment-5fdcfffc56-mfmxn 1/1 Running 0 28s
pod/nginx-deployment-5fdcfffc56-t2g46 1/1 Running 0 28s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx-lb LoadBalancer 10.102.169.185 211.183.3.202 80:32710/TCP 28s
root@manager:/home/user1/k8slab# k create configmap testmap \
> --from-literal k8s=kubernetes \
> --from-literal container=docker
configmap/testmap created
root@manager:/home/user1/k8slab# k get cm
NAME DATA AGE
kube-root-ca.crt 1 4h32m
testmap 2 23s
root@manager:/home/user1/k8slab# k describe cm testmap
Name: testmap
Namespace: default
Labels: <none>
Annotations: <none>
Data
====
container:
----
docker
k8s:
----
kubernetes
Events: <none>
root@manager:/home/user1/k8slab# k create cm cmtest1 \
> --from-literal name=gildong \
> --from-literal age=24
configmap/cmtest1 created
root@manager:/home/user1/k8slab#
root@manager:/home/user1/k8slab# k create cm cmtest2 \
> --from-literal name=chulsoo \
> --from-literal age=25
configmap/cmtest2 created
root@manager:/home/user1/k8slab# k get cm
NAME DATA AGE
cmtest1 2 56s
cmtest2 2 27s
kube-root-ca.crt 1 5h3m
testmap 2 31m
root@manager:/home/user1/k8slab# k describe cm cmtest1
Name: cmtest1
Namespace: default
Labels: <none>
Annotations: <none>
Data
====
age:
----
24
name:
----
gildong
Events: <none>
root@manager:/home/user1/k8slab# vi cmtestpod.yaml
apiVersion: v1
kind: Pod
metadata:
name: cmtestpod
spec: # 컨테이너에 대한 내용
containers:
- name: cmtestpod-ctn
image: busybox
args: ['tail', '-f', '/dev/null'] # /dev/null 파일을 실시간을 봄. 컨테이너가 종료되지 않도록 하는 용도
envFrom:
- configMapRef:
name: cmtest1
root@manager:/home/user1/k8slab# k apply -f cmtestpod.yaml
pod/cmtestpod created
root@manager:/home/user1/k8slab# k get pod
NAME READY STATUS RESTARTS AGE
cmtestpod 0/1 ContainerCreating 0 19s
root@manager:/home/user1/k8slab#
root@manager:/home/user1/k8slab# k exec cmtestpod -- env
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
HOSTNAME=cmtestpod
age=24
name=gildong
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP_PROTO=tcp
NGINX_LB_PORT_80_TCP_PORT=80
NGINX_LB_PORT_80_TCP_ADDR=10.110.108.162
KUBERNETES_SERVICE_PORT=443
KUBERNETES_SERVICE_PORT_HTTPS=443
NGINX_LB_PORT_80_TCP_PROTO=tcp
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
NGINX_LB_SERVICE_PORT_WEB_PORT=80
KUBERNETES_PORT=tcp://10.96.0.1:443
NGINX_LB_SERVICE_HOST=10.110.108.162
NGINX_LB_SERVICE_PORT=80
NGINX_LB_PORT=tcp://10.110.108.162:80
NGINX_LB_PORT_80_TCP=tcp://10.110.108.162:80
KUBERNETES_SERVICE_HOST=10.96.0.1
KUBERNETES_PORT_443_TCP_PORT=443
HOME=/root
-> configMap으로 생성한 cmtest1의 정보가 환경 변수로 등록되어있다.
root@manager:/home/user1/k8slab# vi cmtestpodvol.yaml
apiVersion: v1
kind: Pod
metadata:
name: cmtestpodvol
spec:
containers:
- name: cmtestpodvolctn
image: busybox
args: ['tail', '-f', '/dev/null']
volumeMounts:
- name: cmtestpod-volume # 아래에서 생성한 볼륨과 연결
mountPath: /etc/testcm # /age와 /name이라는 파일이 생성될 것임
volumes:
- name: cmtestpod-volume
configMap:
name: cmtest2
root@manager:/home/user1/k8slab# k apply -f cmtestpodvol.yaml
pod/cmtestpodvol created
root@manager:/home/user1/k8slab# k get pod
NAME READY STATUS RESTARTS AGE
cmtestpod 1/1 Running 0 11m
cmtestpodvol 1/1 Running 0 31s
root@manager:/home/user1/k8slab# k exec cmtestpodvol -- ls /etc/testcm
age
name
root@manager:/home/user1/k8slab# k exec cmtestpodvol -- cat /etc/testcm/age
25
25root@manager:/home/user1/k8slab# k exec cmtestpodvol -- cat /etc/testcm/name
chulsooroot
이미지 출처: https://www.kloia.com/blog/kubernetes-horizontal-pod-autoscaler
이미지 출처: https://www.devopsschool.com/blog/what-is-metrics-server-and-how-to-install-metrics-server/
메트릭 서버를 인터넷으로부터 직접 설치하게 되면 공인 인증서를 통한 인증이 먼저 수행되어야 한다. 하지만 우리는 해당 인증서 발급을 하지 않은 상태이므로 이 단계를 bypass하더라도 문제 없도록 --kubelet-insecure-tls 를 작성해 둔다.
pod의 autoscale은 지정된 자원 사용량의 제한을 확인하고 이를 넘어서는 경우 수평적인 확장을 하게 된다. 따라서 pod가 어느 정도의 자원을 할당 받았는지 미리 지정해 두어야 한다.
참고:
1000m: CPU 1개
500m: CPU 0.5개
200m: CPU 0.2개
resources:
limits:
cpu: 500m
requests:
cpu: 200m
-> 최소 보장값은 CPU 0.2개
-> 만약 다른 pod에서 cpu를 사용하고 있지 않아 물리 자원(CPU)에 여유가 있다면 이를 확장하여 최대 0.5개까지 사용하겠다.
Quiz.
root@manager:/home/user1/k8slab# wget https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
--2022-09-05 15:51:47-- https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
Resolving github.com (github.com)... 20.200.245.247
Connecting to github.com (github.com)|20.200.245.247|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github.com/kubernetes-sigs/metrics-server/releases/download/metrics-server-helm-chart-3.8.2/components.yaml [following]
--2022-09-05 15:51:48-- https://github.com/kubernetes-sigs/metrics-server/releases/download/metrics-server-helm-chart-3.8.2/components.yaml
Reusing existing connection to github.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/92132038/d85e100a-2404-4c5e-b6a9-f3814ad4e6e5?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220905%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220905T065148Z&X-Amz-Expires=300&X-Amz-Signature=f2be64e6059ec6ed0d4f8a265b92c8677c57a90bb482c6e6d415b3399b9e8bd8&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=92132038&response-content-disposition=attachment%3B%20filename%3Dcomponents.yaml&response-content-type=application%2Foctet-stream [following]
--2022-09-05 15:51:48-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/92132038/d85e100a-2404-4c5e-b6a9-f3814ad4e6e5?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20220905%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20220905T065148Z&X-Amz-Expires=300&X-Amz-Signature=f2be64e6059ec6ed0d4f8a265b92c8677c57a90bb482c6e6d415b3399b9e8bd8&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=92132038&response-content-disposition=attachment%3B%20filename%3Dcomponents.yaml&response-content-type=application%2Foctet-stream
Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...
Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4181 (4.1K) [application/octet-stream]
Saving to: ‘components.yaml’
components.yaml 100%[==================================================================================================================>] 4.08K --.-KB/s in 0s
2022-09-05 15:51:49 (37.2 MB/s) - ‘components.yaml’ saved [4181/4181]
root@manager:/home/user1/k8slab# vi components.yaml
root@manager:/home/user1/k8slab# k apply -f components.yaml
serviceaccount/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
service/metrics-server created
deployment.apps/metrics-server created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
root@manager:/home/user1/k8slab# vi autoscaletest.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: autoscaletest
spec:
selector:
matchLabels:
color: black
replicas: 3
template: # pod 구성 내용
metadata:
labels:
color: black
spec: # 컨테이너 구성 내용
containers:
- name: autoscaletest-nginx
image: nginx
ports:
- containerPort: 80
resources:
limits:
cpu: 500m
requests:
cpu: 200m
---
apiVersion: v1
kind: Service
metadata:
name: nginx3-lb
spec:
ports:
- name: web3-port
port: 80
selector:
color: black # pod의 label
type: LoadBalancer
autoscaling 설정 예시
resources: limits: cpu: 500m requests: cpu: 200m
root@manager:/home/user1/k8slab# k apply -f autoscaletest.yaml
deployment.apps/autoscaletest created
service/nginx3-lb created
root@manager:/home/user1/k8slab# k get deploy,svc
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/autoscaletest 3/3 3 3 4m12s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 6h25m
service/nginx-lb LoadBalancer 10.110.108.162 211.183.3.201 80:30105/TCP 4h27m
service/nginx3-lb LoadBalancer 10.103.102.245 211.183.3.203 80:32370/TCP 4m12s
root@manager:/home/user1/k8slab# k autoscale deploy autoscaletest \
> --cpu-percent=10 \
> --min=1 \
> --max=20
horizontalpodautoscaler.autoscaling/autoscaletest autoscaled
root@manager:/home/user1/k8slab# k get deploy,svc,pod
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/autoscaletest 3/3 3 3 6m8s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 6h27m
service/nginx-lb LoadBalancer 10.110.108.162 211.183.3.201 80:30105/TCP 4h29m
service/nginx3-lb LoadBalancer 10.103.102.245 211.183.3.203 80:32370/TCP 6m8s
NAME READY STATUS RESTARTS AGE
pod/autoscaletest-7c7986bdcb-dm2m8 1/1 Running 0 6m8s
pod/autoscaletest-7c7986bdcb-fwsrl 1/1 Running 0 6m8s
pod/autoscaletest-7c7986bdcb-l5sz4 1/1 Running 0 6m8s
pod/cmtestpod 1/1 Running 0 77m
pod/cmtestpodvol 1/1 Running 0 66m
root@manager:/home/user1/k8slab# k get pod
NAME READY STATUS RESTARTS AGE
autoscaletest-7c7986bdcb-dm2m8 1/1 Running 0 7m36s
autoscaletest-7c7986bdcb-fwsrl 1/1 Running 0 7m36s
autoscaletest-7c7986bdcb-l5sz4 1/1 Running 0 7m36s
cmtestpod 1/1 Running 0 79m
cmtestpodvol 1/1 Running 0 68m
root@manager:/home/user1/k8slab# kubectl top no --use-protocol-buffers ; kubectl get hpa
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
manager 334m 8% 2096Mi 55%
worker1 189m 4% 968Mi 52%
worker2 204m 5% 1051Mi 57%
worker3 188m 4% 955Mi 51%
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
autoscaletest Deployment/autoscaletest 0%/10% 1 20 3 2m11s
-> replicas 3개가 실행 중이다.
[worker1]
root@worker1:/home/user1# ab -c 1000 -n 200 -t 60 http://211.183.3.203:80/
This is ApacheBench, Version 2.3 <$Revision: 1843412 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 211.183.3.203 (be patient)
Completed 5000 requests
Completed 10000 requests
Completed 15000 requests
Completed 20000 requests
Completed 25000 requests
Completed 30000 requests
Completed 35000 requests
Completed 40000 requests
Completed 45000 requests
apr_pollset_poll: The timeout specified has expired (70007)
Total of 49981 requests completed
root@manager:/home/user1/k8slab# kubectl top no --use-protocol-buffers ; kubectl get hpa -w
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
manager 376m 9% 2195Mi 57%
worker1 318m 7% 1030Mi 55%
worker2 258m 6% 1033Mi 56%
worker3 297m 7% 950Mi 51%
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
autoscaletest Deployment/autoscaletest 1%/10% 1 20 4 15m
autoscaletest Deployment/autoscaletest 0%/10% 1 20 8 15m
autoscaletest Deployment/autoscaletest 0%/10% 1 20 16 15m
autoscaletest Deployment/autoscaletest 0%/10% 1 20 17 15m
autoscaletest Deployment/autoscaletest 0%/10% 1 20 17 16m
autoscaletest Deployment/autoscaletest 0%/10% 1 20 17 21m
autoscaletest Deployment/autoscaletest 0%/10% 1 20 1 21m
부하가 발생하자, scale out이 진행되어 컨테이너가 17개까지 늘어났다.
일정 시간이 지나자, scale out이 진행되어 컨테이너가 min 개수인 1개로 줄어들었다.
manager: apt install -y nfs-server
worker: apt install -y nfs-common -> client임
no_root_squash: 외부 노드에서 작성한 파일을 로컬(nfs-server)에서 확인했을 때 root가 작성한 것(소유주가 root)으로 간주한다.
/root/pvpvc/shared [허용할 주소](rw,no_root_squash,sync)
root@manager:~# apt install -y nfs-server
root@manager:~# systemctl status nfs-server | grep Active
Active: active (exited) since Mon 2022-09-05 17:30:08 KST; 1min 45s ago
root@manager:~# mkdir -p ~/pvpvc/shared
root@manager:~# chmod 777 pvpvc/shared/
(단, pod에서만 접속이 가능하도록 해야한다.)
root@manager:~# vi /etc/exports
/root/pvpvc/shared 211.183.3.*(rw,no_root_squash,sync)
허용 IP: 211.183.3.*
root@manager:~# ufw disable
Firewall stopped and disabled on system startup
root@manager:~# systemctl restart nfs-server
root@manager:~/k8slab# vi nfs-pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: nfs-pod
spec:
containers:
- name: nfs-mount-container
image: busybox
args: [ "tail", "-f", "/dev/null" ]
volumeMounts:
- name: nfs-volume
mountPath: /mnt # 포드 컨테이너 내부의 /mnt 디렉터리에 마운트합니다.
volumes:
- name : nfs-volume
nfs: # NFS 서버의 볼륨을 포드의 컨테이너에 마운트합니다.
path: /root/pvpvc/shared
server: 211.183.3.100
root@manager:~/k8slab# k apply -f nfs-pod.yaml
pod/nfs-pod created
단, 모든 pod에서 볼 수 있어야 한다.
[worker 1, 2, 3]
root@worker1:/home/user1# apt install -y nfs-common
root@manager:~/k8slab# touch /root/pvpvc/shared/test.txt
root@manager:~/k8slab# k exec nfs-pod -- ls /mnt
test.txt
apiVersion: v1
---
apiVersion: apps/v1
'
abc
def
'
-> 출력: abcdef
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 211.183.3.201-211.183.3.239
추가적으로 | 뒤에 - 를 붙이면 마지막 라인이 비워져 있어도 이를 지워준다.
abc
def
ghi
⬇
abc
def
ghi
root@manager:/home/user1/k8slab# alias k='kubectl'
root@manager:/home/user1/k8slab# k get pod
NAME READY STATUS RESTARTS AGE
nginx-deployment-5fdcfffc56-5szpw 1/1 Running 0 3m45s
nginx-deployment-5fdcfffc56-ctbhm 1/1 Running 0 3m45s
nginx-deployment-5fdcfffc56-gqk7l 1/1 Running 0 3m45s
진도가 너무 빨라요 ㅠㅠㅠ