How to delete kubesphere completely

cloud2000·2024년 3월 3일
1
#!/usr/bin/env bash

function delete_sure(){
  cat << eof
$(echo -e "\033[1;36mNote:\033[0m")

Delete the KubeSphere cluster, including the module kubesphere-system kubesphere-devops-system kubesphere-devops-worker kubesphere-monitoring-system kubesphere-logging-system openpitrix-system.
eof

read -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
while [[ "x"$ans != "xyes" && "x"$ans != "xno" ]]; do
    read -p "Please reconfirm that you want to delete the KubeSphere cluster.  (yes/no) " ans
done

if [[ "x"$ans == "xno" ]]; then
    exit
fi
}


delete_sure

# delete ks-installer
kubectl delete deploy ks-installer -n kubesphere-system 2>/dev/null

# delete helm
# helm ls -A
# NAME                      	NAMESPACE                   	REVISION	UPDATED                                	STATUS  	CHART                     	APP VERSION
# devops                    	kubesphere-devops-system    	4       	2024-02-24 16:04:57.575955004 +0900 KST	failed  	ks-devops-0.2.2           	v3.4.2
# devops                    	argocd                      	3       	2024-02-24 15:57:10.29786064 +0900 KST 	deployed	argo-cd-4.4.0             	v2.3.3
# jaeger-operator           	istio-system                	1       	2024-02-24 15:57:56.630443016 +0900 KST	deployed	jaeger-operator-2.29.0    	1.31.0
# kiali-operator            	istio-system                	1       	2024-02-24 15:58:20.176617205 +0900 KST	deployed	kiali-operator-1.50.1     	v1.50.1
# ks-core                   	kubesphere-system           	2       	2024-02-25 10:13:29.98473402 +0900 KST 	deployed	ks-core-0.1.0             	v3.3.2
# ks-events                 	kubesphere-logging-system   	1       	2024-02-24 15:55:35.051857963 +0900 KST	deployed	kube-events-0.4.0         	0.6.0
# ks-minio                  	kubesphere-system           	1       	2024-02-24 15:42:37.56101028 +0900 KST 	deployed	minio-2.5.16              	RELEASE.2019-08-07T01-59-21Z
# ks-openldap               	kubesphere-system           	1       	2024-02-24 15:42:00.069202132 +0900 KST	deployed	openldap-ha-0.1.0         	1.0
# kube-auditing             	kubesphere-logging-system   	1       	2024-02-24 15:54:47.092276975 +0900 KST	deployed	kube-auditing-0.2.0       	0.2.0
# kubesphere-router-kubesphere-system        	kubesphere-controls-system  	1       	2024-02-25 12:57:00.033292959 +0900 KST	deployed	gateway-0.1.0             	1.16.0
# kubesphere-router-kubesphere-system-ingress	kubesphere-controls-system  	1       	2024-02-25
# logsidecar-injector       	kubesphere-logging-system   	1       	2024-02-24 15:56:16.613405076 +0900 KST	deployed	logsidecar-injector-0.1.0 	1.2.0
# notification-manager      	kubesphere-monitoring-system	2       	2024-02-24 16:04:34.220085252 +0900 KST	deployed	notification-manager-2.3.0	2.3.0
# opensearch-data           	kubesphere-logging-system   	1       	2024-02-24 15:43:30.644208587 +0900 KST	deployed	opensearch-2.11.0         	2.6.0
# opensearch-logging-curator	kubesphere-logging-system   	1       	2024-02-24 15:43:38.77181046 +0900 KST 	deployed	opensearch-curator-0.0.5  	0.0.5
# opensearch-master         	kubesphere-logging-system   	1       	2024-02-24 15:43:28.930681794 +0900 KST	deployed	opensearch-2.11.0         	2.6.0
# snapshot-controller       	kube-system                 	2       	2024-02-25 10:10:33.901254079 +0900 KST	deployed	snapshot-controller-0.2.0 	4.0.0

target_namespaces=(
  kubesphere-system
  kubesphere-controls-system
  kubesphere-devops-system
  kubesphere-monitoring-system
  kubesphere-logging-system
  openpitrix-system
  kubesphere-monitoring-federated
  istio-system
)

for namespaces in "${target_namespaces[@]}"
do
  helm list -n $namespaces | grep -v NAME | awk '{print $1}' | sort -u | xargs -r -L1 helm uninstall -n $namespaces 2>/dev/null
done

# delete kubefed
kubectl get cc -n kubesphere-system ks-installer -o jsonpath="{.status.multicluster}" | grep enable
if [[ $? -eq 0 ]]; then
  # delete kubefed types resources
  for kubefed in `kubectl api-resources --namespaced=true --api-group=types.kubefed.io -o name`
  do
    kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
  done
  for kubefed in `kubectl api-resources --namespaced=false --api-group=types.kubefed.io -o name`
  do
    kubectl delete $kubefed --all 2>/dev/null
  done
  # delete kubefed core resouces
  for kubefed in `kubectl api-resources --namespaced=true --api-group=core.kubefed.io -o name`
  do
    kubectl delete -n kube-federation-system $kubefed --all 2>/dev/null
  done
  for kubefed in `kubectl api-resources --namespaced=false --api-group=core.kubefed.io -o name`
  do
    kubectl delete $kubefed --all 2>/dev/null
  done
  # uninstall kubefed chart
  helm uninstall -n kube-federation-system kubefed 2>/dev/null
fi


helm uninstall -n kube-system snapshot-controller 2>/dev/null

# delete kubesphere deployment & statefulset
kubectl delete deployment -n kubesphere-system `kubectl get deployment -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null
kubectl delete statefulset -n kubesphere-system `kubectl get statefulset -n kubesphere-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null

# delete monitor resources
kubectl delete prometheus -n kubesphere-monitoring-system k8s 2>/dev/null
kubectl delete Alertmanager -n kubesphere-monitoring-system main 2>/dev/null
kubectl delete DaemonSet -n kubesphere-monitoring-system node-exporter 2>/dev/null
kubectl delete statefulset -n kubesphere-monitoring-system `kubectl get statefulset -n kubesphere-monitoring-system -o jsonpath="{.items[*].metadata.name}"` 2>/dev/null

# delete grafana
kubectl delete deployment -n kubesphere-monitoring-system grafana 2>/dev/null
kubectl --no-headers=true get pvc -n kubesphere-monitoring-system -o custom-columns=:metadata.namespace,:metadata.name | grep -E kubesphere-monitoring-system | xargs -n2 kubectl delete pvc -n 2>/dev/null

# delete pvc
pvcs="kubesphere-system|openpitrix-system|kubesphere-devops-system|kubesphere-logging-system"
kubectl --no-headers=true get pvc --all-namespaces -o custom-columns=:metadata.namespace,:metadata.name | grep -E $pvcs | xargs -n2 kubectl delete pvc -n 2>/dev/null


# delete rolebindings
delete_role_bindings() {
  for rolebinding in `kubectl -n $1 get rolebindings -l iam.kubesphere.io/user-ref -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl -n $1 delete rolebinding $rolebinding 2>/dev/null
  done
}

# delete roles
delete_roles() {
  kubectl -n $1 delete role admin 2>/dev/null
  kubectl -n $1 delete role operator 2>/dev/null
  kubectl -n $1 delete role viewer 2>/dev/null
  for role in `kubectl -n $1 get roles -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl -n $1 delete role $role 2>/dev/null
  done
}

# remove useless labels and finalizers
for ns in `kubectl get ns -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl label ns $ns kubesphere.io/workspace-
  kubectl label ns $ns kubesphere.io/namespace-
  kubectl patch ns $ns -p '{"metadata":{"finalizers":null,"ownerReferences":null}}'
  delete_role_bindings $ns
  delete_roles $ns
done

# delete clusterroles
delete_cluster_roles() {
  for role in `kubectl get clusterrole -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl delete clusterrole $role 2>/dev/null
  done

  for role in `kubectl get clusterroles | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
  do
    kubectl delete clusterrole $role 2>/dev/null
  done
}
delete_cluster_roles

# delete clusterrolebindings
delete_cluster_role_bindings() {
  for rolebinding in `kubectl get clusterrolebindings -l iam.kubesphere.io/role-template -o jsonpath="{.items[*].metadata.name}"`
  do
    kubectl delete clusterrolebindings $rolebinding 2>/dev/null
  done

  for rolebinding in `kubectl get clusterrolebindings | grep "kubesphere" | awk '{print $1}'| paste -sd " "`
  do
    kubectl delete clusterrolebindings $rolebinding 2>/dev/null
  done
}
delete_cluster_role_bindings

# delete clusters
for cluster in `kubectl get clusters -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch cluster $cluster -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete clusters --all 2>/dev/null

# delete workspaces
for ws in `kubectl get workspaces -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch workspace $ws -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspaces --all 2>/dev/null

# make DevOps CRs deletable
for devops_crd in $(kubectl get crd -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep "devops.kubesphere.io"); do
    for ns in $(kubectl get ns -ojsonpath='{.items..metadata.name}'); do
        for devops_res in $(kubectl get $devops_crd -n $ns -oname); do
            kubectl patch $devops_res -n $ns -p '{"metadata":{"finalizers":[]}}' --type=merge
        done
    done
done

# delete validatingwebhookconfigurations
validatingwebhookconfigurations=(
  cluster.kubesphere.io
  istio-validator-1-14-6-istio-system
  istiod-default-validator
  ks-events-admission-validate
  network.kubesphere.io
  notification-manager-validating-webhook
  resourcesquotas.quota.kubesphere.io
  rulegroups.alerting.kubesphere.io
  storageclass-accessor.storage.kubesphere.io
  users.iam.kubesphere.io
  validating-webhook-configuration
)

for webhook in "${validatingwebhookconfigurations[@]}"
do
  kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done

# delete mutatingwebhookconfigurations
mutatingwebhookconfigurations=(
  istio-revision-tag-default
  istio-sidecar-injector-1-14-6
  ks-events-admission-mutate
  mutating-webhook-configuration
  rulegroups.alerting.kubesphere.io
)
for webhook in "${mutatingwebhookconfigurations[@]}"
do
  kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io $webhook 2>/dev/null
done

# delete users
for user in `kubectl get users -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch user $user -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete users --all 2>/dev/null

kubectl -n kubesphere-logging-system patch fluentbit fluent-bit -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-logging-system delete fluentbit fluent-bit 

kubectl -n kubesphere-controls-system patch gateway kubesphere-router-kubesphere-system -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-controls-system delete gateway kubesphere-router-kubesphere-system

kubectl -n kubesphere-controls-system patch nginx kubesphere-router-kubesphere-system-ingress -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n kubesphere-controls-system delete gatenginxway kubesphere-router-kubesphere-system-ingress

kubectl -n istio-system patch kiali kiali -p '{"metadata":{"finalizers":null}}' --type=merge
kubectl -n istio-system delete kiali kiali

# delete helm resources
for resource_type in `echo helmcategories helmapplications helmapplicationversions helmrepos helmreleases`; do
  for resource_name in `kubectl get ${resource_type}.application.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`; do
    kubectl patch ${resource_type}.application.kubesphere.io ${resource_name} -p '{"metadata":{"finalizers":null}}' --type=merge
  done
  kubectl delete ${resource_type}.application.kubesphere.io --all 2>/dev/null
done

# delete workspacetemplates
for workspacetemplate in `kubectl get workspacetemplates.tenant.kubesphere.io -o jsonpath="{.items[*].metadata.name}"`
do
  kubectl patch workspacetemplates.tenant.kubesphere.io $workspacetemplate -p '{"metadata":{"finalizers":null}}' --type=merge
done
kubectl delete workspacetemplates.tenant.kubesphere.io --all 2>/dev/null

# delete federatednamespaces in namespace kubesphere-monitoring-federated
for resource in $(kubectl get federatednamespaces.types.kubefed.io -n kubesphere-monitoring-federated -oname); do
  kubectl patch "${resource}" -p '{"metadata":{"finalizers":null}}' --type=merge -n kubesphere-monitoring-federated
done

# delete crds
for crd in `kubectl get crds -o jsonpath="{.items[*].metadata.name}"`
do
  if [[ $crd == *kubesphere.io ]] || [[ $crd == *kubefed.io ]] ; then kubectl delete crd $crd 2>/dev/null; fi
done

# delete relevance ns
# NAME                              STATUS   AGE
# istio-system                      Active   21h
# kubekey-system                    Active   21h
# kubesphere-controls-system        Active   21h
# kubesphere-devops-system          Active   21h
# kubesphere-devops-worker          Active   21h
# kubesphere-logging-system         Active   21h
# kubesphere-monitoring-federated   Active   21h
# kubesphere-monitoring-system      Active   21h
# kubesphere-system                 Active   21h

delete_namespaces=(
  kube-federation-system 
  kubesphere-alerting-system 
  kubesphere-controls-system 
  kubesphere-devops-system 
  kubesphere-devops-worker 
  kubesphere-logging-system 
  kubesphere-monitoring-system 
  kubesphere-monitoring-federated 
  openpitrix-system 
  kubesphere-system
  kubekey-system
  istio-system
)

for ns in "${delete_namespaces[@]}"
do
  kubectl delete ns $ns 2>/dev/null
done
profile
클라우드쟁이

0개의 댓글