Ubuntu 패키지 저장소 변경
sed -i 's/security.ubuntu.com/mirror.kakao.com/g' /etc/apt/sources.list
sed -i 's/archive.ubuntu.com/mirror.kakao.com/g' /etc/apt/sources.list
sudo apt update
Control Plane(api -> cm, ccm, sched -> let,proxy) --> Work Node(let, proxy)
Control Plane
sudo apt-mark unhold kubeadm
sudo apt update
sudo apt upgrade kubeadm=1.22.9-00 -y
kubeadm version
sudo apt-mark hold kubeadm
sudo kubeadm upgrade plan
sudo kubeadm upgrade apply v1.22.9
sudo apt-mark unhold kubelet kubectl
sudo apt upgrade kubectl=1.22.9-00 kubelet=1.22.9-00 -y
sudo apt-mark hold kubelet kubectl
kubelet --version
kubectl version
drain 작업
sudo systemctl daemon-reload
sudo systemctl restart kubelet
uncordon 작업
systemctl status kubelet
Work Node
sudo apt-mark unhold kubeadm
sudo apt update
sudo apt upgrade kubeadm=1.22.9-00 -y
kubeadm version
sudo apt-mark hold kubeadm
`
sudo kubeadm upgrade node
drain 작업
sudo apt-mark unhold kubelet kubectl
sudo apt upgrade kubectl=1.22.9-00 kubelet=1.22.9-00 -y
sudo apt-mark hold kubelet kubectl
kubelet --version
kubectl version
sudo systemctl daemon-reload
sudo systemctl restart kubelet
uncordon 작업
https://kubernetes.io/ko/docs/setup/production-environment/tools/kubespray/
https://kubespray.io/#/
https://github.com/kubernetes-sigs/kubespray
Control Plane 1
Work Node 3(1 Control Plan + 2 Woker Node)
CPU: 2, Memory 3GB
~/vagrant/k8s
Vagrant.configure("2") do |config|
# Define VM
config.vm.define "k8s-node1" do |ubuntu|
ubuntu.vm.box = "ubuntu/focal64"
ubuntu.vm.hostname = "k8s-node1"
ubuntu.vm.network "private_network", ip: "192.168.100.100"
ubuntu.vm.provider "virtualbox" do |vb|
vb.name = "k8s-node1"
vb.cpus = 2
vb.memory = 3000
end
end
config.vm.define "k8s-node2" do |ubuntu|
ubuntu.vm.box = "ubuntu/focal64"
ubuntu.vm.hostname = "k8s-node2"
ubuntu.vm.network "private_network", ip: "192.168.100.101"
ubuntu.vm.provider "virtualbox" do |vb|
vb.name = "k8s-node2"
vb.cpus = 2
vb.memory = 3000
end
end
config.vm.define "k8s-node3" do |ubuntu|
ubuntu.vm.box = "ubuntu/focal64"
ubuntu.vm.hostname = "k8s-node3"
ubuntu.vm.network "private_network", ip: "192.168.100.102"
ubuntu.vm.provider "virtualbox" do |vb|
vb.name = "k8s-node3"
vb.cpus = 2
vb.memory = 3000
end
end
config.vm.provision "shell", inline: <<-SHELL
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
sed -i 's/archive.ubuntu.com/mirror.kakao.com/g' /etc/apt/sources.list
sed -i 's/security.ubuntu.com/mirror.kakao.com/g' /etc/apt/sources.list
systemctl restart ssh
SHELL
end
ssh-keygen
ssh-copy-id vagrant@192.168.100.100
ssh-copy-id vagrant@192.168.100.101
ssh-copy-id vagrant@192.168.100.102
cd ~
git clone -b v2.18.1 https://github.com/kubernetes-sigs/kubespray.git
cd kubespray
sudo apt update
sudo apt install python3-pip -y
sudo pip3 install -r requirements.txt
cp -rpf inventory/sample/ inventory/mycluster
inventory/mycluster/inventory.ini
[all]
node1 ansible_host=192.168.100.100 ip=192.168.100.100
node2 ansible_host=192.168.100.101 ip=192.168.100.101
node3 ansible_host=192.168.100.102 ip=192.168.100.102
[kube_control_plane]
node1
[etcd]
node1
[kube_node]
node1
node2
node3
[calico_rr]
[k8s_cluster:children]
kube_control_plane
kube_node
calico_rr
(kubespray 변수 정의 및 정리 - https://kubespray.io/#/docs/vars)
inventory/mycluster/group_vars
ansible all -m ping -i inventory/mycluster/inventory.ini
ansible-playbook -i inventory/mycluster/inventory.ini cluster.yml -b
mkdir ~/.kube
sudo cp /etc/kubernetes/admin.conf ~/.kube/config
sudo chown vagrant:vagrant ~/.kube/config
kubectl get nodes
kubectl get pods -A
https://kubernetes.io/ko/docs/concepts/overview/working-with-objects/kubernetes-objects/
kubectl api-resouces
Label/LabelSelector
Workload
- Pod
- Controller
- ReplicationController
- ReplicaSets
- DaemonSets
- Jobs
- CronJobs
- Deployments
- StatefulSets
- HorizontalPodAutoscaler
Network
- Service
- Endpoints
- Ingress
Storage
- PersistentVolume
- PersistentVolumeClaim
- ConfigMap
- Secret
Authentication
- ServiceAccount
- RBAC
- Role
- ClusterRole
- RoleBinding
- ClusterRoleBinding
Resource Isolation
- Namespaces
Resource Limits
- Limits
- Requests
- ResourceQuota
- LimitRange
Scheduling
- NodeName
- NodeSelector
- Affinity
- Node Affinity
- Pod Affinity
- Pod Anti Affinity
- Taints/Tolerations
- Drain/Cordon
https://kubernetes.io/ko/docs/reference/using-api/#api-%EA%B7%B8%EB%A3%B9
kubectl api-versions
apps/v1
그룹이 없는 api는 core 그룹
Alpha -> Beta -> Stable
apiVersion:
kind:
metdata:
spec:
kubectl explain pods
kubectl explain pods.metadata
kubectl explain pods.spec
kubectl explain pods.spec.containers
kubectl explain pods.spec.containers.images
kubectl explain pods.spec --recursive
https://kubernetes.io/ko/docs/concepts/overview/working-with-objects/object-management/
kubectl create
kubectl run
kubectl expose
kubectl create -f a.yaml
kubectl replace -f a.yaml
kubectl patch -f a.yaml
kubectl delete -f a.yaml
kubectl apply -f resources/