~/.kube/config
apiVersion: v1
kind: Config
preferences: {}
clusters:
- name: cluster.local
cluster:
certificate-authority-data: LS0tLS1...
server: https://127.0.0.1:6443
- name: mycluster
cluster:
server: https://1.2.3.4:6443
users:
- name: myadmin
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1...
client-key-data: LS0tLS1...
contexts:
- context:
cluster: mycluster
user: myadmin
name: myadmin@mycluster
- context:
cluster: cluster.local
user: kubernetes-admin
name: kubernetes-admin@cluster.local
current-context: kubernetes-admin@cluster.local
kubectl config view
kubectl config get-clusters
kubectl config get-contexts
kubectl config get-users
kubectl config use-context myadmin@mycluster
쿠버네티스의 사용자
인증 방법:
Authorization: Bearer 31ada4fd-adec-460c-809a-9e56ceb75269
https://kubernetes.io/docs/reference/access-authn-authz/rbac/
요청 동사
ClusterRole
kubectl create sa <NAME>
Private Key
openssl genrsa -out myuser.key 2048
x509 인증서 요청 생성
openssl req -new -key myuser.key -out myuser.csr -subj "/CN=myuser"
cat myuser.csr | base64 | tr -d "\n"
csr.yaml
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: myuser-csr
spec:
usages:
- client auth
signerName: kubernetes.io/kube-apiserver-client
request: LS0tLS1CRUdJTiB
kubectl create -f csr.yaml
kubectl get csr
상태: Pending
kubectl certificate approve myuser-csr
kubectl get csr
상태: Approved, Issued
kubectl get csr myuser-csr -o yaml
status.certificates
kubectl get csr myuser-csr -o jsonpath='{.status.certificate}' | base64 -d > myuser.crt
Kubeconfig 사용자 생성
kubectl config set-credentials myuser --client-certificate=myuser.crt --client-key=myuser.key --embed-certs=true
Kubeconfig 컨텍스트 생성
kubectl config set-context myuser@cluster.local --cluster=cluster.local --user=myuser --namespace=default
kubectl config get-users
kubectl config get-clusters
kubectl config get-contexts
kubectl config use-context myuser@cluster.local
클러스터 롤 바인딩 생성
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: myuser-view-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: myuser
용어
helm v3는 tiller를 사용하지 않음
curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | sudo tee /usr/share/keyrings/helm.gpg > /dev/null
sudo apt-get install apt-transport-https --yes
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
sudo apt-get update
sudo apt-get install helm
Helm Chart 검색
https://artifacthub.io/
<Chart Name>/
Chart.yaml
values.yaml
templates/
aritifacthub 검색
helm search hub <PATTERN>
저장소 추가
helm repo add bitnami https://charts.bitnami.com/bitnami
저장소 검색
helm search repo wordpress
차트 설치
helm install mywordpress bitnami/wordpress
릴리즈 확인
helm list
릴리즈 삭제
helm uninstall mywordpress
차트 정보 확인
helm show readme binami/wordpress
helm show chart binami/wordpress
helm show values binami/wordpress
차트 사용자화
helm install mywp bitnami/wordpress --set replicaCount=2
helm install mywp bitnami/wordpress --set replicaCount=2 --set service.type=NodePort
릴리즈 업그레이드
helm show value bitnami/wordpress > wp-value.yaml
파일 수정
helm upgrade mywp bitnami/wordpress -f wp-value.yaml
릴리즈 업그레이드 히스토리
helm history mywp
릴리즈 롤백
helm rollback mywp 1
wp-value2.yaml
replicaCount: 1
service:
type: LoadBalancer
helm upgrade mywp bitnami/wordpress -f wp-value2.yaml
CPU, Memoty, Network IO, Disk IO
https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm update
prom-value.yaml
grafana:
service:
type: LoadBalancer
kubectl create ns monitor
helm install prom prometheus-community/kube-prometheus-stack -f prom-values.yaml -n monitor
웹브라우저
http://192.168.100.24X
ID: admin
PWD: prom-operator
ELK Stack: Elasticsearch + Logstash + Kibana
EFK Stack: Elasticsearch + Fluentd + Kibana
Elasticsearch + Fluent Bit + Kibana
Elastic Stack: Elasticsearch + Beat + Kibana
helm repo add elastic https://helm.elastic.co
helm repo update
helm show values elastic/elasticsearch > es-value.yaml
es-value.yaml
18 replicas: 1
19 minimumMasterNodes: 1
80 resources:
81 requests:
82 cpu: "500m"
83 memory: "1Gi"
84 limits:
85 cpu: "500m"
86 memory: "1Gi"
kubectl create ns logging
helm install elastic elastic/elasticsearch -f es-value.yaml -n logging
git clone https://github.com/fluent/fluent-bit-kubernetes-logging.git
cd fluent-bit-kubernetes-logging
kubectl create -f fluent-bit-service-account.yaml
kubectl create -f fluent-bit-role-1.22.yaml
kubectl create -f fluent-bit-role-binding-1.22.yaml
kubectl create -f output/elasticsearch/fluent-bit-configmap.yaml
output/elasticsearch/fluent-bit-ds.yaml
32 - name: FLUENT_ELASTICSEARCH_HOST
33 value: "elasticsearch-master"
kubectl create -f output/elasticsearch/fluent-bit-ds.yaml
helm show values elastic/kibana > kibana-value.yaml
kibana-value.yaml
49 resources:
50 requests:
51 cpu: "500m"
52 memory: "1Gi"
53 limits:
54 cpu: "500m"
55 memory: "1Gi"
119 service:
120 type: LoadBalancer
helm install kibana elastic/kibana -f kibana-value.yaml -n logging
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k
~/.zshrc
ZSH_THEME="powerlevel10k/powerlevel10k"
exec zsh
p10k configure
wget https://github.com/ahmetb/kubectx/releases/download/v0.9.4/kubectx
wget https://github.com/ahmetb/kubectx/releases/download/v0.9.4/kubens
sudo install kubectx /usr/local/bin
sudo install kubens /usr/local/bin