준비 사항
IAM User 생성
- {alias}
- AdministratorAccess Policy 부여
AWS VPC 구성
- 퍼블릭 서브넷 2개 (AZ 이중화)
- 프라이빗 서브넷 2개 (AZ 이중화)
- IGW
- NAT
resource "aws_vpc" "default" {
cidr_block = "10.1.0.0/16"
tags = {
Name = "my-eks-vpc"
}
}
resource "aws_subnet" "public-subnet-1" {
vpc_id = aws_vpc.default.id
cidr_block = "10.1.1.0/26"
availability_zone = "ap-northeast-2a"
tags = {
Name = "my-eks-public-subnet-a"
}
}
resource "aws_subnet" "public-subnet-2" {
vpc_id = aws_vpc.default.id
cidr_block = "10.1.1.64/26"
availability_zone = "ap-northeast-2c"
tags = {
Name = "my-eks-public-subnet-c"
}
}
resource "aws_subnet" "private_subnet-1" {
vpc_id = aws_vpc.default.id
cidr_block = "10.1.1.128/27"
availability_zone = "ap-northeast-2a"
tags = {
Name = "my-eks-private-subnet-a"
}
}
resource "aws_subnet" "private_subnet-2" {
vpc_id = aws_vpc.default.id
cidr_block = "10.1.1.160/27"
availability_zone = "ap-northeast-2c"
tags = {
Name = "my-eks-private-subnet-c"
}
}
resource "aws_nat_gateway" "private-nat" {
connectivity_type = "private"
subnet_id = aws_subnet.private_subnet-1.id
}
resource "aws_internet_gateway" "igw" {
vpc_id = aws_vpc.default.id
}
AWS CLI (v2)
프로파일 설정
# 프로파일 설정
$ aws configure --profile admin
$ aws configure --profile admin list // 설정 확인
# 현재의 default 권한 확인
$ cat ~/.aws/config
$ aws sts get-caler-identitiy --profile admin
tfstate 구성 (=s3 bucket 사용)
- 버킷 생성
- 서울 리전(ap-northeast-2):{alias}-tfstate
- 특이사항: 버킷 버저닝 활성화
esource "aws_s3_bucket" "eks_backend" {
bucket = "my-eks-tfstate-bucket"
versoning {
enable = true
}
tags = {
Name = "eks-terraform-backend"
}
}
깃 레포지토리
폴더 구조
├── backend.tf
├── data.tf
├── main.tf
├── modules
│ └── eks-cluster
│ ├── data.tf
│ ├── main.tf
│ ├── output.tf
│ ├── provider.tf
│ └── variables.tf
├── output.tf
└── provider.tf
backend.tf
# tfstate를 저장할 backend 설정
terraform {
backend "s3" {
bucket = "my-eks-tfstate-bucket"
key = "terraform.state"
region = "ap-northeast-2"
profile = "admin"
}
}
data.tf
data "aws_caller_identity" "current" {}
main.tf
module "eks" {
# eks 모듈에서 사용할 변수 정의
source = "./modules/eks-cluster"
cluster_name = "my-eks-cluster"
cluster_version = "1.24"
vpc_id = "<VPC ID>"
private_subnets = ["<Subnet ID>", "<Subnet ID>"]
public_subnets = ["<Subnet ID>", "<Subnet ID>"]
}
output.tf
output "cluster_id" {
value = module.eks.cluster_id
}
output "cluster_primary_security_group_id" {
value = module.eks.cluster_primary_security_group_id
}
provider.tf
# aws provider 설정
terraform {
required_version = "~> 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.46"
}
}
}
provider "aws" {
profile = "admin"
region = "ap-northeast-2"
}
./modules/eks-cluster/data.tf
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
data "aws_caller_identity" "current" {}
data "aws_partition" "current" {}
data "aws_availability_zones" "available" {}
data "aws_ecrpublic_authorization_token" "token" {
provider = aws.virginia
}
./modules/eks-cluster/main.tf
locals {
cluster_name = var.cluster_name
cluster_version = var.cluster_version
region = "ap-northeast-2"
vpc_id = var.vpc_id
public_subnets = var.public_subnets
private_subnets = var.private_subnets
tag = {
Environment = "test"
Terraform = "true"
}
}
module "eks" {
# https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest
source = "terraform-aws-modules/eks/aws"
version = "18.31.0"
# Cluster Name Setting
cluster_name = local.cluster_name
cluster_version = local.cluster_version
# Cluster Endpoint Setting
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
# Network Setting
vpc_id = local.vpc_id
subnet_ids = local.private_subnets
# IRSA Enable / OIDC 구성
enable_irsa = true
node_security_group_additional_rules = {
ingress_nodes_karpenter_port = {
description = "Cluster API to Node group for Karpenter webhook"
protocol = "tcp"
from_port = 8443
to_port = 8443
type = "ingress"
source_cluster_security_group = true
}
}
# Tag Node Security Group
node_security_group_tags = {
"karpenter.sh/discovery" = local.cluster_name
}
eks_managed_node_groups = {
initial = {
instance_types = ["t3.large"]
create_security_group = false
create_launch_template = false # do not remove
launch_template_name = "" # do not remove
min_size = 2
max_size = 3
desired_size = 2
iam_role_additional_policies = [
# Required by Karpenter
"arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
]
}
}
# console identity mapping (AWS user)
# eks configmap aws-auth에 콘솔 사용자 혹은 역할을 등록
manage_aws_auth_configmap = true
aws_auth_users = [
{
userarn = "arn:aws:iam::<AWS어카운트ID>:user/admin"
username = "admin"
groups = ["system:masters"]
},
]
aws_auth_accounts = [
"<AWS어카운트ID>"
]
}
// 프라이빗 서브넷 태그
resource "aws_ec2_tag" "private_subnet_tag" {
for_each = toset(local.private_subnets)
resource_id = each.value
key = "kubernetes.io/role/internal-elb"
value = "1"
}
resource "aws_ec2_tag" "private_subnet_cluster_tag" {
for_each = toset(local.private_subnets)
resource_id = each.value
key = "kubernetes.io/cluster/${local.cluster_name}"
value = "owned"
}
resource "aws_ec2_tag" "private_subnet_karpenter_tag" {
for_each = toset(local.private_subnets)
resource_id = each.value
key = "karpenter.sh/discovery/${local.cluster_name}"
value = local.cluster_name
}
// 퍼블릭 서브넷 태그
resource "aws_ec2_tag" "public_subnet_tag" {
for_each = toset(local.public_subnets)
resource_id = each.value
key = "kubernetes.io/role/elb"
value = "1"
}
./modules/eks-cluster/output.tf
output "cluster_id" {
value = module.eks.cluster_id
}
output "cluster_primary_security_group_id" {
value = module.eks.cluster_primary_security_group_id
}
./modules/eks-cluster/provider.tf
terraform {
required_version = "~> 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.46"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.5"
}
kubectl = {
source = "gavinbunney/kubectl"
version = "~> 1.14"
}
}
}
provider "aws" {
profile = "admin"
region = "us-east-1"
alias = "virginia"
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
}
provider "helm" {
kubernetes {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
}
}
provider "kubectl" {
apply_retry_count = 5
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
./modules/eks-cluster/variables.tf
variable "cluster_name" {
}
variable "cluster_version" {
}
variable "vpc_id" {
}
variable "public_subnets" {
}
variable "private_subnets" {
}
$ terraform init
$ terraform plan
$ terraform apply