본 게시물은 CloudNet@팀 Gasida(서종호) 님이 진행하시는
AWS EKS Workshop Study 내용을 기반으로 작성되었습니다.
vpc.tf
provider "aws" {
region = var.TargetRegion
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~>5.7"
name = "${var.ClusterBaseName}-VPC"
cidr = var.VpcBlock
azs = var.availability_zones
enable_dns_support = true
enable_dns_hostnames = true
public_subnets = var.public_subnet_blocks
private_subnets = var.private_subnet_blocks
enable_nat_gateway = true
single_nat_gateway = true
one_nat_gateway_per_az = false
map_public_ip_on_launch = true
igw_tags = {
"Name" = "${var.ClusterBaseName}-IGW"
}
nat_gateway_tags = {
"Name" = "${var.ClusterBaseName}-NAT"
}
public_subnet_tags = {
"Name" = "${var.ClusterBaseName}-PublicSubnet"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"Name" = "${var.ClusterBaseName}-PrivateSubnet"
"kubernetes.io/role/internal-elb" = "1"
}
tags = {
"Environment" = "${var.ClusterBaseName}-lab"
}
}
eks.tf
data "aws_caller_identity" "current" {}
resource "aws_iam_policy" "external_dns_policy" {
name = "${var.ClusterBaseName}ExternalDNSPolicy"
description = "Policy for allowing ExternalDNS to modify Route 53 records"
policy = jsonencode({
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets"
],
"Resource": [
"arn:aws:route53:::hostedzone/*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:ListResourceRecordSets"
],
"Resource": [
"*"
]
}
]
})
}
resource "aws_iam_role_policy_attachment" "external_dns_policy_attach" {
role = "${var.ClusterBaseName}-node-group-eks-node-group"
policy_arn = aws_iam_policy.external_dns_policy.arn
depends_on = [module.eks]
}
resource "aws_security_group" "node_group_sg" {
name = "${var.ClusterBaseName}-node-group-sg"
description = "Security group for EKS Node Group"
vpc_id = module.vpc.vpc_id
tags = {
Name = "${var.ClusterBaseName}-node-group-sg"
}
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "~>20.0"
cluster_name = var.ClusterBaseName
cluster_version = var.KubernetesVersion
cluster_endpoint_private_access = false
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
most_recent = true
}
kube-proxy = {
most_recent = true
}
vpc-cni = {
most_recent = true
}
}
vpc_id = module.vpc.vpc_id
enable_irsa = true
subnet_ids = module.vpc.public_subnets
eks_managed_node_groups = {
default = {
name = "${var.ClusterBaseName}-node-group"
use_name_prefix = false
instance_type = var.WorkerNodeInstanceType
desired_size = var.WorkerNodeCount
max_size = var.WorkerNodeCount + 2
min_size = var.WorkerNodeCount - 1
disk_size = var.WorkerNodeVolumesize
subnets = module.vpc.public_subnets
key_name = var.KeyName
vpc_security_group_ids = [aws_security_group.node_group_sg.id]
iam_role_name = "${var.ClusterBaseName}-node-group-eks-node-group"
iam_role_use_name_prefix = false
iam_role_additional_policies = {
"${var.ClusterBaseName}ExternalDNSPolicy" = aws_iam_policy.external_dns_policy.arn
}
}
}
access_entries = {
admin = {
kubernetes_groups = []
principal_arn = "${data.aws_caller_identity.current.arn}"
policy_associations = {
myeks = {
policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSClusterAdminPolicy"
access_scope = {
namespaces = []
type = "cluster"
}
}
}
}
}
tags = {
Environment = "${var.ClusterBaseName}-lab"
Terraform = "true"
}
}
irsa.tf
locals {
cluster_oidc_issuer_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:oidc-provider/${replace(module.eks.cluster_oidc_issuer_url, "https://", "")}"
}
module "eks-external-dns" {
source = "DNXLabs/eks-external-dns/aws"
version = "0.2.0"
cluster_name = var.ClusterBaseName
cluster_identity_oidc_issuer = module.eks.cluster_oidc_issuer_url
cluster_identity_oidc_issuer_arn = local.cluster_oidc_issuer_arn
enabled = false
}
module "aws_load_balancer_controller_irsa_role" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "5.3.1"
role_name = "${var.ClusterBaseName}-aws-load-balancer-controller"
attach_load_balancer_controller_policy = true
oidc_providers = {
ex = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["kube-system:aws-load-balancer-controller"]
}
}
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
token = data.aws_eks_cluster_auth.cluster.token
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_name
depends_on = [module.eks]
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_name
depends_on = [module.eks]
}
resource "kubernetes_service_account" "aws_lb_controller" {
metadata {
name = "aws-load-balancer-controller"
namespace = "kube-system"
annotations = {
"eks.amazonaws.com/role-arn" = module.aws_load_balancer_controller_irsa_role.iam_role_arn
}
}
}
var.tf
variable "KeyName" {
description = "Name of an existing EC2 KeyPair to enable SSH access to the instances."
type = string
}
variable "ClusterBaseName" {
description = "Base name of the cluster."
type = string
default = "myeks"
}
variable "KubernetesVersion" {
description = "Kubernetes version for the EKS cluster."
type = string
default = "1.29"
}
variable "WorkerNodeInstanceType" {
description = "EC2 instance type for the worker nodes."
type = string
default = "t3.medium"
}
variable "WorkerNodeCount" {
description = "Number of worker nodes."
type = number
default = 3
}
variable "WorkerNodeVolumesize" {
description = "Volume size for worker nodes (in GiB)."
type = number
default = 30
}
variable "TargetRegion" {
description = "AWS region where the resources will be created."
type = string
default = "ap-northeast-2"
}
variable "availability_zones" {
description = "List of availability zones."
type = list(string)
default = ["ap-northeast-2a", "ap-northeast-2b", "ap-northeast-2c"]
}
variable "VpcBlock" {
description = "CIDR block for the VPC."
type = string
default = "192.168.0.0/16"
}
variable "public_subnet_blocks" {
description = "List of CIDR blocks for the public subnets."
type = list(string)
default = ["192.168.1.0/24", "192.168.2.0/24", "192.168.3.0/24"]
}
variable "private_subnet_blocks" {
description = "List of CIDR blocks for the private subnets."
type = list(string)
default = ["192.168.11.0/24", "192.168.12.0/24", "192.168.13.0/24"]
}
# 코드 가져오기
git clone https://github.com/gasida/aews-cicd.git
cd aews-cicd/4
# terraform 환경 변수 저장
export TF_VAR_KeyName=[각자 ssh keypair]
echo $TF_VAR_KeyName
#
terraform init
terraform plan
# 10분 후 배포 완료
terraform apply -auto-approve
#
terraform state list
data.aws_caller_identity.current
data.aws_eks_cluster.cluster
data.aws_eks_cluster_auth.cluster
aws_iam_policy.external_dns_policy
aws_iam_role_policy_attachment.external_dns_policy_attach
aws_security_group.node_group_sg
kubernetes_service_account.aws_lb_controller
module.aws_load_balancer_controller_irsa_role.data.aws_caller_identity.current
module.aws_load_balancer_controller_irsa_role.data.aws_iam_policy_document.load_balancer_controller[0]
module.aws_load_balancer_controller_irsa_role.data.aws_iam_policy_document.this[0]
module.aws_load_balancer_controller_irsa_role.data.aws_partition.current
module.aws_load_balancer_controller_irsa_role.aws_iam_policy.load_balancer_controller[0]
module.aws_load_balancer_controller_irsa_role.aws_iam_role.this[0]
module.aws_load_balancer_controller_irsa_role.aws_iam_role_policy_attachment.load_balancer_controller[0]
module.eks.data.aws_caller_identity.current
module.eks.data.aws_eks_addon_version.this["coredns"]
module.eks.data.aws_eks_addon_version.this["kube-proxy"]
module.eks.data.aws_eks_addon_version.this["vpc-cni"]
module.eks.data.aws_iam_policy_document.assume_role_policy[0]
module.eks.data.aws_iam_session_context.current
module.eks.data.aws_partition.current
module.eks.data.tls_certificate.this[0]
module.eks.aws_cloudwatch_log_group.this[0]
module.eks.aws_ec2_tag.cluster_primary_security_group["Environment"]
module.eks.aws_ec2_tag.cluster_primary_security_group["Terraform"]
module.eks.aws_eks_access_entry.this["admin"]
module.eks.aws_eks_access_policy_association.this["admin_myeks"]
module.eks.aws_eks_addon.this["coredns"]
module.eks.aws_eks_addon.this["kube-proxy"]
module.eks.aws_eks_addon.this["vpc-cni"]
module.eks.aws_eks_cluster.this[0]
module.eks.aws_iam_openid_connect_provider.oidc_provider[0]
module.eks.aws_iam_policy.cluster_encryption[0]
module.eks.aws_iam_role.this[0]
module.eks.aws_iam_role_policy_attachment.cluster_encryption[0]
module.eks.aws_iam_role_policy_attachment.this["AmazonEKSClusterPolicy"]
module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"]
module.eks.aws_security_group.cluster[0]
module.eks.aws_security_group.node[0]
module.eks.aws_security_group_rule.cluster["ingress_nodes_443"]
module.eks.aws_security_group_rule.node["egress_all"]
module.eks.aws_security_group_rule.node["ingress_cluster_443"]
module.eks.aws_security_group_rule.node["ingress_cluster_4443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_6443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_8443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_9443_webhook"]
module.eks.aws_security_group_rule.node["ingress_cluster_kubelet"]
module.eks.aws_security_group_rule.node["ingress_nodes_ephemeral"]
module.eks.aws_security_group_rule.node["ingress_self_coredns_tcp"]
module.eks.aws_security_group_rule.node["ingress_self_coredns_udp"]
module.eks.time_sleep.this[0]
module.eks-external-dns.data.aws_region.current
module.vpc.aws_default_network_acl.this[0]
module.vpc.aws_default_route_table.default[0]
module.vpc.aws_default_security_group.this[0]
module.vpc.aws_eip.nat[0]
module.vpc.aws_internet_gateway.this[0]
module.vpc.aws_nat_gateway.this[0]
module.vpc.aws_route.private_nat_gateway[0]
module.vpc.aws_route.public_internet_gateway[0]
module.vpc.aws_route_table.private[0]
module.vpc.aws_route_table.public[0]
module.vpc.aws_route_table_association.private[0]
module.vpc.aws_route_table_association.private[1]
module.vpc.aws_route_table_association.private[2]
module.vpc.aws_route_table_association.public[0]
module.vpc.aws_route_table_association.public[1]
module.vpc.aws_route_table_association.public[2]
module.vpc.aws_subnet.private[0]
module.vpc.aws_subnet.private[1]
module.vpc.aws_subnet.private[2]
module.vpc.aws_subnet.public[0]
module.vpc.aws_subnet.public[1]
module.vpc.aws_subnet.public[2]
module.vpc.aws_vpc.this[0]
module.eks.module.eks_managed_node_group["default"].data.aws_caller_identity.current
module.eks.module.eks_managed_node_group["default"].data.aws_iam_policy_document.assume_role_policy[0]
module.eks.module.eks_managed_node_group["default"].data.aws_partition.current
module.eks.module.eks_managed_node_group["default"].aws_eks_node_group.this[0]
module.eks.module.eks_managed_node_group["default"].aws_iam_role.this[0]
module.eks.module.eks_managed_node_group["default"].aws_iam_role_policy_attachment.additional["myeksExternalDNSPolicy"]
module.eks.module.eks_managed_node_group["default"].aws_iam_role_policy_attachment.this["AmazonEC2ContainerRegistryReadOnly"]
module.eks.module.eks_managed_node_group["default"].aws_iam_role_policy_attachment.this["AmazonEKSWorkerNodePolicy"]
module.eks.module.eks_managed_node_group["default"].aws_iam_role_policy_attachment.this["AmazonEKS_CNI_Policy"]
module.eks.module.eks_managed_node_group["default"].aws_launch_template.this[0]
module.eks.module.kms.data.aws_caller_identity.current[0]
module.eks.module.kms.data.aws_iam_policy_document.this[0]
module.eks.module.kms.data.aws_partition.current[0]
module.eks.module.kms.aws_kms_alias.this["cluster"]
module.eks.module.kms.aws_kms_key.this[0]
module.eks.module.eks_managed_node_group["default"].module.user_data.null_resource.validate_cluster_service_cidr
#
terraform console
-----------------
data.aws_caller_identity.current
data.aws_caller_identity.current.arn
data.aws_eks_cluster.cluster
data.aws_eks_cluster.cluster.name
data.aws_eks_cluster.cluster.version
data.aws_eks_cluster.cluster.access_config
data.aws_eks_cluster_auth.cluster
kubernetes_service_account.aws_lb_controller
-----------------
terraform state show ...
#
cat terraform.tfstate | jq
more terraform.tfstate
#
kubectl get node -v=6
# EKS 클러스터 인증 정보 업데이트
CLUSTER_NAME=myeks
aws eks update-kubeconfig --region ap-northeast-2 --name $CLUSTER_NAME
kubectl config rename-context "arn:aws:eks:ap-northeast-2:$(aws sts get-caller-identity --query 'Account' --output text):cluster/$CLUSTER_NAME" "Aews-Labs"
#
kubectl cluster-info
Kubernetes control plane is running at https://95B7CF54FC62775CF381ED4B27A7A9A2.gr7.ap-northeast-2.eks.amazonaws.com
CoreDNS is running at https://95B7CF54FC62775CF381ED4B27A7A9A2.gr7.ap-northeast-2.eks.amazonaws.com/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
kubectl get node --label-columns=node.kubernetes.io/instance-type,eks.amazonaws.com/capacityType,topology.kubernetes.io/zone
NAME STATUS ROLES AGE VERSION INSTANCE-TYPE CAPACITYTYPE ZONE
ip-192-168-1-109.ap-northeast-2.compute.internal Ready <none> 19m v1.29.0-eks-5e0fdde t3.medium ON_DEMAND ap-northeast-2a
ip-192-168-2-80.ap-northeast-2.compute.internal Ready <none> 19m v1.29.0-eks-5e0fdde t3.medium ON_DEMAND ap-northeast-2b
ip-192-168-3-161.ap-northeast-2.compute.internal Ready <none> 19m v1.29.0-eks-5e0fdde t3.medium ON_DEMAND ap-northeast-2c
kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system aws-node-5kt6m 2/2 Running 0 18m
kube-system aws-node-bwqgv 2/2 Running 0 19m
kube-system aws-node-hm8mq 2/2 Running 0 19m
kube-system coredns-6bddb7676-7zm2q 1/1 Running 0 19m
kube-system coredns-6bddb7676-8qrvh 1/1 Running 0 19m
kube-system kube-proxy-d2p88 1/1 Running 0 19m
kube-system kube-proxy-gj7s9 1/1 Running 0 19m
kube-system kube-proxy-k5lk6 1/1 Running 0 19m
#
cd ..
mkdir 5
cd 5
cp ../4/*.tf .
ls
#
terraform init
terraform apply -auto-approve -var=ClusterBaseName=myeks2 -var=KubernetesVersion="1.28"
# EKS 클러스터 인증 정보 가져오기
CLUSTER_NAME2=myeks2
aws eks update-kubeconfig --region ap-northeast-2 --name $CLUSTER_NAME2 --kubeconfig ./myeks2config
# EKS 클러스터 정보 확인
kubectl --kubeconfig ./myeks2config get node
NAME STATUS ROLES AGE VERSION
ip-192-168-1-130.ap-northeast-2.compute.internal Ready <none> 16m v1.28.5-eks-5e0fdde
ip-192-168-2-230.ap-northeast-2.compute.internal Ready <none> 16m v1.28.5-eks-5e0fdde
ip-192-168-3-122.ap-northeast-2.compute.internal Ready <none> 16m v1.28.5-eks-5e0fdde
kubectl --kubeconfig ./myeks2config get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system aws-node-75h7m 2/2 Running 0 18m
kube-system aws-node-p9nhj 2/2 Running 0 18m
kube-system aws-node-vm6c6 2/2 Running 0 18m
kube-system coredns-55474bf7b9-7fspj 1/1 Running 0 18m
kube-system coredns-55474bf7b9-hbklv 1/1 Running 0 18m
kube-system kube-proxy-b7cdm 1/1 Running 0 18m
kube-system kube-proxy-jx85n 1/1 Running 0 18m
kube-system kube-proxy-ktknh 1/1 Running 0 18m