kubekey는 Kubesphere의 여러 오픈소스 프로젝트 중 하나로, kubernetes 클러스터를 cli를 통해 손쉽게 설치할 수 있게 해준다. 또한, 클러스터 구성파일에 kubesphere 관련 설정을 함께 추가할 경우 kubesphere까지 함께 설치해 준다.
이번 글에서는 kubekey의 동작 방식을 살펴보고, VScode 디버그를 사용해서 kubekey에 핵심이 되는 코드들의 내용을 확인해 본다.
먼저, vs code의 디버깅 파일인 .vscode/launch.json 파일 작성
{
"version": "0.2.0",
"configurations": [
{
"name": "kk create cluster",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceRoot}/cmd/kk",
"args": ["create", "cluster", "-f", "${workspaceRoot}/bin/utcl-cluster.yaml", "-y"]
}
]
}
utcl-cluster.yaml 파일이 내용은 아래와 같다.
apiVersion: kubekey.kubesphere.io/v1alpha2
kind: Cluster
metadata:
name: ut-cluster
spec:
hosts:
- {name: utcl, address: 172.16.6.128, user: cloud, password: xxxxx, arch: arm64}
roleGroups:
etcd:
- utcl
control-plane:
- utcl
worker:
registry:
controlPlaneEndpoint:
domain: lb.kubesphere.local
address: ""
port: 6443
kubernetes:
#version: v1.21.5
version: v1.24.9
imageRepo: kubesphere
containerManager: containerd # Container Runtime, support: containerd, cri-o, isula. [Default: docker]
clusterName: cluster.local
autoRenewCerts: true # Whether to install a script which can automatically renew the Kubernetes control plane certificates. [Default: false]
masqueradeAll: false # masqueradeAll tells kube-proxy to SNAT everything if using the pure iptables proxy mode. [Default: false].
maxPods: 110 # maxPods is the number of Pods that can run on this Kubelet. [Default: 110]
podPidsLimit: 10000 # podPidsLimit is the maximum number of PIDs in any pod. [Default: 10000]
nodeCidrMaskSize: 24 # The internal network node size allocation. This is the size allocated to each node on your network. [Default: 24]
proxyMode: ipvs # Specify which proxy mode to use. [Default: ipvs]
featureGates: # enable featureGates, [Default: {"ExpandCSIVolumes":true,"RotateKubeletServerCertificate": true,"CSIStorageCapacity":true, "TTLAfterFinished":true}]
CSIStorageCapacity: true
ExpandCSIVolumes: true
RotateKubeletServerCertificate: true
TTLAfterFinished: true
etcd:
type: kubekey
network:
plugin: calico
kubePodsCIDR: 10.233.64.0/18
kubeServiceCIDR: 10.233.0.0/18
## multus support. https://github.com/k8snetworkplumbingwg/multus-cni
multusCNI:
enabled: false
registry:
type: harbor
plainHTTP: false
privateRegistry: ""
namespaceOverride: ""
registryMirrors: []
insecureRegistries: []
auths: {}
addons: []
---
apiVersion: installer.kubesphere.io/v1alpha1
kind: ClusterConfiguration
metadata:
name: ks-installer
namespace: kubesphere-system
labels:
version: v3.3.2
spec:
persistence:
storageClass: ""
authentication:
jwtSecret: ""
zone: ""
local_registry: ""
namespace_override: ""
etcd:
monitoring: true
endpointIps: localhost
port: 2379
tlsEnable: true
common:
core:
console:
enableMultiLogin: true
port: 30000
type: NodePort
redis:
enabled: true
volumeSize: 2Gi
openldap:
enabled: true
volumeSize: 2Gi
minio:
volumeSize: 20Gi
monitoring:
# type: external
endpoint: http://prometheus-operated.kubesphere-monitoring-system.svc:9090
GPUMonitoring:
enabled: false
gpu:
kinds:
- resourceName: "nvidia.com/gpu"
resourceType: "GPU"
default: true
es:
logMaxAge: 7
elkPrefix: logstash
basicAuth:
enabled: false
username: ""
password: ""
externalElasticsearchHost: ""
externalElasticsearchPort: ""
alerting:
enabled: true
auditing:
enabled: true
devops:
enabled: true
jenkinsMemoryLim: 2Gi
jenkinsMemoryReq: 1500Mi
jenkinsVolumeSize: 8Gi
jenkinsJavaOpts_Xms: 512m
jenkinsJavaOpts_Xmx: 512m
jenkinsJavaOpts_MaxRAM: 2g
events:
enabled: true
logging:
enabled: true
containerruntime: docker
logsidecar:
enabled: true
replicas: 2
# resources: {}
metrics_server:
enabled: true
monitoring:
storageClass: ""
gpu:
nvidia_dcgm_exporter:
enabled: false
# resources: {}
multicluster:
clusterRole: none
network:
networkpolicy:
enabled: false
ippool:
type: none
topology:
type: none
openpitrix:
store:
enabled: true
servicemesh:
enabled: true
kubeedge:
enabled: false
cloudCore:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
cloudhubPort: "10000"
cloudhubQuicPort: "10001"
cloudhubHttpsPort: "10002"
cloudstreamPort: "10003"
tunnelPort: "10004"
cloudHub:
advertiseAddress:
- ""
nodeLimit: "100"
service:
cloudhubNodePort: "30000"
cloudhubQuicNodePort: "30001"
cloudhubHttpsNodePort: "30002"
cloudstreamNodePort: "30003"
tunnelNodePort: "30004"
edgeWatcher:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
edgeWatcherAgent:
nodeSelector: {"node-role.kubernetes.io/worker": ""}
tolerations: []
cmd/kk/pkg/version/kubesphere/ks_installer.go
cmd/kk/pkg/version/kubesphere/version_enum.go
cmd/kk/pkg/version/kubernetes/version_enum.go
클러스터 삭제시
systemctl disable containerd && systemctl stop containerd
rm -f /usr/local/sbin/runc
rm -f /usr/bin/crictl
rm -f /usr/bin/containerd*
rm -f /usr/bin/ctr
rm -f /etc/systemd/system/containerd.service
rm -f /etc/containerd/config.toml
rm -f /etc/crictl.yamlnetworkResetCmds = []string{
"iptables -F",
"iptables -X",
"iptables -F -t nat",
"iptables -X -t nat",
"ipvsadm -C",
"ip link del kube-ipvs0",
"ip link del nodelocaldns",
"ip link del cni0",
"ip link del flannel.1",
"ip link del flannel-v6.1",
"ip link del flannel-wg",
"ip link del flannel-wg-v6",
"ip link del cilium_host",
"ip link del cilium_vxlan",
"ip link del vxlan.calico",
"ip link del vxlan-v6.calico",
"ip -br link show | grep 'cali[a-f0-9]*' | awk -F '@' '{print $1}' | xargs -r -t -n 1 ip link del",
"ip netns show 2>/dev/null | grep cni- | xargs -r -t -n 1 ip netns del",
}systemctl stop etcd && exit 0
rm -f etcdFiles = []string{
"/usr/local/bin/etcd",
"/etc/ssl/etcd",
"/var/lib/etcd/*",
"/etc/etcd.env",
}rm -f []string{
"/etc/kubernetes",
"/etc/systemd/system/etcd.service",
"/etc/systemd/system/backup-etcd.service",
"/etc/systemd/system/backup-etcd.timer",
"/var/log/calico",
"/etc/cni",
"/var/log/pods/",
"/var/lib/cni",
"/var/lib/calico",
"/var/lib/kubelet/*",
"/run/calico",
"/run/flannel",
"/etc/flannel",
"/var/openebs",
"/etc/systemd/system/kubelet.service",
"/etc/systemd/system/kubelet.service.d",
"/usr/local/bin/kubelet",
"/usr/local/bin/kubeadm",
"/usr/bin/kubelet",
"/var/lib/rook",
"/tmp/kubekey",
"/etc/kubekey",
}systemctl disable k8s-certs-renew.timer 1>/dev/null 2>/dev/null
systemctl stop k8s-certs-renew.timer 1>/dev/null 2>/dev/null
rm -f /usr/local/bin/kube-scripts/k8s-certs-renew.sh
rm -f /etc/systemd/system/k8s-certs-renew.service
rm -f /etc/systemd/system/k8s-certs-renew.service
ip addr del %s dev %s
$ kubekey delete cluster -f config.yml -A -y
https://github.com/kubesphere/ks-installer/blob/master/scripts/kubesphere-delete.sh 를 참고로 하여 istio-system 네임스페이스 포함하여 reset될 수 있도록 수정함.
#!/usr/bin/env bash
function delete_sure(){
cat << eof
$(echo -e "\033[1;36mNote:\033[0m")
Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-devops-worker kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
eof
read -p "Please reconfirm that you want to delete the KubeSphere cluster. (yes/no) " ans
while [[ "x"$ans != "xyes" && "x"$ans != "xno" ]]; do
read -p "Please reconfirm that you want to delete the KubeSphere cluster. (yes/no) " ans
done
if [[ "x"$ans == "xno" ]]; then
exit
fi
}
delete_sure
# delete ks-installer
kubectl delete deploy ks-installer -n kubesphere-system 2>/dev/null
# delete helm
# root@node-63:~# helm ls -A
# NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
# devops kubesphere-devops-system 4 2024-02-24 16:04:57.575955004 +0900 KST failed ks-devops-0.2.2 v3.4.2
# devops argocd 3 2024-02-24 15:57:10.29786064 +0900 KST deployed argo-cd-4.4.0 v2.3.3
# jaeger-operator istio-system 1 2024-02-24 15:57:56.630443016 +0900 KST deployed jaeger-operator-2.29.0 1.31.0
# kiali-operator istio-system 1 2024-02-24 15:58:20.176617205 +0900 KST deployed kiali-operator-1.50.1 v1.50.1
# ks-core kubesphere-system 2 2024-02-25 10:13:29.98473402 +0900 KST deployed ks-core-0.1.0 v3.3.2
# ks-events kubesphere-logging-system 1 2024-02-24 15:55:35.051857963 +0900 KST deployed kube-events-0.4.0 0.6.0
# ks-minio kubesphere-system 1 2024-02-24 15:42:37.56101028 +0900 KST deployed minio-2.5.16 RELEASE.2019-08-07T01-59-21Z
# ks-openldap kubesphere-system 1 2024-02-24 15:42:00.069202132 +0900 KST deployed openldap-ha-0.1.0 1.0
# kube-auditing kubesphere-logging-system 1 2024-02-24 15:54:47.092276975 +0900 KST deployed kube-auditing-0.2.0 0.2.0
# kubesphere-router-kubesphere-system kubesphere-controls-system 1 2024-02-25 12:57:00.033292959 +0900 KST deployed gateway-0.1.0 1.16.0
# kubesphere-router-kubesphere-system-ingress kubesphere-controls-system 1 2024-02-25
# logsidecar-injector kubesphere-logging-system 1 2024-02-24 15:56:16.613405076 +0900 KST deployed logsidecar-injector-0.1.0 1.2.0
# notification-manager kubesphere-monitoring-system 2 2024-02-24 16:04:34.220085252 +0900 KST deployed notification-manager-2.3.0 2.3.0
# opensearch-data kubesphere-logging-system 1 2024-02-24 15:43:30.644208587 +0900 KST deployed opensearch-2.11.0 2.6.0
# opensearch-logging-curator kubesphere-logging-system 1 2024-02-24 15:43:38.77181046 +0900 KST deployed opensearch-curator-0.0.5 0.0.5
# opensearch-master kubesphere-logging-system 1 2024-02-24 15:43:28.930681794 +0900 KST deployed opensearch-2.11.0 2.6.0
# snapshot-controller kube-system 2 2024-02-25 10:10:33.901254079 +0900 KST deployed snapshot-controller-0.2.0 4.0.0
target_namespaces=(
kubesphere-system
kubesphere-controls-system
kubesphere-devops-system
kubesphere-monitoring-system
kubesphere-logging-system
openpitrix-system
kubesphere-monitoring-federated
istio-system
)
for namespaces in "${target_namespaces[@]}"
do
helm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces 2>/dev/null
done
# delete kubefed
kubectl get cc -n kubesphere-system ks-installer -o jsonpath="{.status.multicluster}" | grep enable
if [[ $? -eq 0 ]]; then
# delete kubefed types resources
for kubefed in `kubectl api-resources --namespaced=true --api-group=types.kubefed.io -o name`
do
kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
done
for kubefed in `kubectl api-resources --namespaced=false --api-group=types.kubefed.io -o name`
do
kubectl delete $kubefed --all 2>/dev/null
done
# delete kubefed core resouces
for kubefed in `kubectl api-resources --namespaced=true --api-group=core.kubefed.io -o name`
do
kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
done
for kubefed in `kubectl api-resources --namespaced=false --api-group=core.kubefed.io -o name`
do
kubectl delete $kubefed --all 2>/dev/null
done
# uninstall kubefed chart
helm uninstall -n kube-federation-system kubefed 2>/dev/null
fi
helm uninstall -n kube-system snapshot-controller 2>/dev/null
# delete kubesphere deployment & statefulset
kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
kubectl delete statefulset -n kubesphere-system `kubectl get statefulset -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
# delete monitor resources
kubectl delete prometheus -n kubesphere-monitoring-system k8s 2>/dev/null
kubectl delete Alertmanager -n kubesphere-monitoring-system main 2>/dev/null
kubectl delete DaemonSet -n kubesphere-monitoring-system node-exporter 2>/dev/null
kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
# delete grafana
kubectl delete deployment -n kubesphere-monitoring-system grafana 2>/dev/null
kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n 2>/dev/null
# delete pvc
pvcs="kubesphere-system|openpitrix-system|kubesphere-devops-system|kubesphere-logging-system"
kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n 2>/dev/null
# delete rolebindings
delete_role_bindings() {
for rolebinding in `kubectl -n $1 get rolebindings -l iam.kubesphere.io/user-ref -o jsonpath="{.items[*].metadata.name}"`
do
kubectl -n $1 delete rolebinding $rolebinding 2>/dev/null
done
}
# delete roles
delete_roles() {
kubectl -n $1 delete role admin 2>/dev/null
kubectl -n $1 delete role operator 2>/dev/null
kubectl -n $1 delete role viewer 2>/dev/null
for role in `kubectl -n $1 get roles -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
do
kubectl -n $1 delete role $role 2>/dev/null
done
}
# remove useless labels and finalizers
for ns in `kubectl get ns -o jsonpath="{.items[*].metadata.name}"`
do
kubectl label ns $ns kubesphere.io/workspace-
kubectl label ns $ns kubesphere.io/namespace-
kubectl patch ns $ns -p '{"metadata":{"finalizers":null,"ownerReferences":null}}'
delete_role_bindings $ns
delete_roles $ns
done
# delete clusterroles
delete_cluster_roles() {
for role in `kubectl get clusterrole -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
do
kubectl delete clusterrole $role 2>/dev/null
done
for role in `kubectl get clusterroles | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
do
kubectl delete clusterrole $role 2>/dev/null
done
}
delete_cluster_roles
# delete clusterrolebindings
delete_cluster_role_bindings() {
for rolebinding in `kubectl get clusterrolebindings -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
do
kubectl delete clusterrolebindings $rolebinding 2>/dev/null
done
for rolebinding in `kubectl get clusterrolebindings | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
do
kubectl delete clusterrolebindings $rolebinding 2>/dev/null
done
}
delete_cluster_role_bindings
# delete clusters
for cluster in `kubectl get clusters -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch cluster $cluster -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete clusters --all 2>/dev/null
# delete workspaces
for ws in `kubectl get workspaces -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch workspace $ws -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspaces --all 2>/dev/null
# make DevOps CRs deletable
for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do
for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do
for devops_res in $(kubectl get $devops_crd -n $ns -oname); do
kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge
done
done
done
# delete validatingwebhookconfigurations
validatingwebhookconfigurations=(
cluster.kubesphere.io
istio-validator-1-14-6-istio-system
istiod-default-validator
ks-events-admission-validate
network.kubesphere.io
notification-manager-validating-webhook
resourcesquotas.quota.kubesphere.io
rulegroups.alerting.kubesphere.io
storageclass-accessor.storage.kubesphere.io
users.iam.kubesphere.io
validating-webhook-configuration
)
for webhook in "${validatingwebhookconfigurations[@]}"
do
kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done
# delete mutatingwebhookconfigurations
mutatingwebhookconfigurations=(
istio-revision-tag-default
istio-sidecar-injector-1-14-6
ks-events-admission-mutate
mutating-webhook-configuration
rulegroups.alerting.kubesphere.io
)
for webhook in "${mutatingwebhookconfigurations[@]}"
do
kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done
# delete users
for user in `kubectl get users -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch user $user -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete users --all 2>/dev/null
kubectl -n kubesphere-logging-system patch fluentbit fluent-bit -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-logging-system delete fluentbit fluent-bit
kubectl -n kubesphere-controls-system patch gateway kubesphere-router-kubesphere-system -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-controls-system delete gateway kubesphere-router-kubesphere-system
kubectl -n kubesphere-controls-system patch nginx kubesphere-router-kubesphere-system-ingress -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-controls-system delete gatenginxway kubesphere-router-kubesphere-system-ingress
kubectl -n istio-system patch kiali kiali -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n istio-system delete kiali kiali
# delete helm resources
for resource_type in `echo helmcategories helmapplications helmapplicationversions helmrepos helmreleases`; do
for resource_name in `kubectl get ${resource_type}.application.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`; do
kubectl patch ${resource_type}.application.kubesphere.io ${resource_name} -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete ${resource_type}.application.kubesphere.io --all 2>/dev/null
done
# delete workspacetemplates
for workspacetemplate in `kubectl get workspacetemplates.tenant.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`
do
kubectl patch workspacetemplates.tenant.kubesphere.io $workspacetemplate -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspacetemplates.tenant.kubesphere.io --all 2>/dev/null
# delete federatednamespaces in namespace kubesphere-monitoring-federated
for resource in $(kubectl get federatednamespaces.types.kubefed.io -n kubesphere-monitoring-federated -oname); do
kubectl patch "${resource}" -p '{"metadata":{"finalizers":null}}' --type=merge -n kubesphere-monitoring-federated
done
# delete crds
for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
do
if [[ $crd == *kubesphere.io ]] || [[ $crd == *kubefed.io ]] ; then kubectl delete crd $crd 2>/dev/null; fi
done
# delete relevance ns
# NAME STATUS AGE
# istio-system Active 21h
# kubekey-system Active 21h
# kubesphere-controls-system Active 21h
# kubesphere-devops-system Active 21h
# kubesphere-devops-worker Active 21h
# kubesphere-logging-system Active 21h
# kubesphere-monitoring-federated Active 21h
# kubesphere-monitoring-system Active 21h
# kubesphere-system Active 21h
delete_namespaces=(
kube-federation-system
kubesphere-alerting-system
kubesphere-controls-system
kubesphere-devops-system
kubesphere-devops-worker
kubesphere-logging-system
kubesphere-monitoring-system
kubesphere-monitoring-federated
openpitrix-system
kubesphere-system
kubekey-system
istio-system
)
for ns in "${delete_namespaces[@]}"
do
kubectl delete ns $ns 2>/dev/null
done