Terraform으로 EKS를 구축하는 방법

ESK(Amazon Elastic Kubernetes Service)는 aws에서 완전 관리되는 Kubernetes 클러스터입니다. 이제 Terraform으로 EKS를 구축하려고 합니다. 모든 코드가 github에 입력되었습니다.

준비



먼저 VPC를 구축한 다음 EKS를 구축해야 합니다. 참조할 수 있습니다.

코드 구조




infrastructure
├── module
   ├── networking
   ├── eks
      ├── main.tf
      ├── outputs.tf
      └── variables.tf
   └── jumpserver
       ├── main.tf
       ├── outputs.tf
       └── variables.tf
├── region
   └── virginia
       ├── main.tf
       └── providers.tf
└── setup
    ├── main.tf
    ├── outputs.tf
    ├── providers.tf
    └── variables.tf


EKS



이제 EKS 빌드를 시작합니다. eks 폴더의 소스 코드는 다음과 같습니다.
  • 메인.tf

  • locals {
      name               = var.name
      vpc_id             = var.vpc_id
      public_subnets_id  = var.public_subnets_id
      private_subnets_id = distinct(flatten(var.private_subnets_id))
      desired_size       = var.desired_size
      max_size           = var.max_size
      min_size           = var.min_size
      security_group_ids = var.security_group_ids
      tags               = var.eks_tags
    }
    
    # Define the role to be attached EKS
    resource "aws_iam_role" "eks_role" {
      name               = "eks-role"
      assume_role_policy = jsonencode({
        "Version" : "2012-10-17",
        "Statement" : [
          {
            "Effect" : "Allow",
            "Action" : [
              "sts:AssumeRole"
            ],
            "Principal" : {
              "Service" : [
                "eks.amazonaws.com"
              ]
            }
          }
        ]
      })
      tags = merge({
        Name : "EKS Role"
      }, local.tags)
    }
    
    # Attach the CloudWatchFullAccess policy to EKS role
    resource "aws_iam_role_policy_attachment" "eks_CloudWatchFullAccess" {
      policy_arn = "arn:aws:iam::aws:policy/CloudWatchFullAccess"
      role       = aws_iam_role.eks_role.name
    }
    
    resource "aws_iam_role_policy_attachment" "eks_AmazonEKSClusterPolicy" {
      policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
      role       = aws_iam_role.eks_role.name
    }
    
    # Optionally, enable Security Groups for Pods
    resource "aws_iam_role_policy_attachment" "eks_AmazonEKSVPCResourceController" {
      policy_arn = "arn:aws:iam::aws:policy/AmazonEKSVPCResourceController"
      role       = aws_iam_role.eks_role.name
    }
    
    # Default Security Group of EKS
    resource "aws_security_group" "security_group" {
      name        = "${local.name} Security Group"
      description = "Default SG to allow traffic from the EKS"
      vpc_id      = local.vpc_id
    
      ingress {
        from_port       = "0"
        to_port         = "0"
        protocol        = "TCP"
        security_groups = local.security_group_ids
      }
    
      tags = merge({
        Name = "${local.name} Security Group"
      }, local.tags)
    
    }
    
    # EKS Cluster
    resource "aws_eks_cluster" "eks" {
      name    = local.name
      version = "1.22"
    
      enabled_cluster_log_types = [
        "api",
        "audit",
        "authenticator",
        "controllerManager",
        "scheduler"
      ]
      role_arn = aws_iam_role.eks_role.arn
    
      timeouts {}
    
      vpc_config {
        endpoint_private_access = true
        endpoint_public_access  = true
        public_access_cidrs     = [
          "0.0.0.0/0",
        ]
        security_group_ids = [
          aws_security_group.security_group.id
        ]
        subnet_ids = flatten([var.public_subnets_id, var.private_subnets_id])
      }
    
      tags = merge({
        Name = local.name
      }, local.tags)
    }
    
    resource "aws_iam_role" "node_group_role" {
      name                  = format("%s-node-group-role", lower(aws_eks_cluster.eks.name))
      path                  = "/"
      force_detach_policies = false
      max_session_duration  = 3600
      assume_role_policy    = jsonencode(
        {
          Statement = [
            {
              Action    = "sts:AssumeRole"
              Effect    = "Allow"
              Principal = {
                Service = "ec2.amazonaws.com"
              }
            },
          ]
          Version = "2012-10-17"
        }
      )
    }
    
    resource "aws_iam_role_policy_attachment" "node_group_AmazonEC2ContainerRegistryReadOnly" {
      policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
      role       = aws_iam_role.node_group_role.id
    }
    
    resource "aws_iam_role_policy_attachment" "node_group_AmazonEKSWorkerNodePolicy" {
      policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
      role       = aws_iam_role.node_group_role.id
    }
    
    resource "aws_iam_role_policy_attachment" "node_group_AmazonEC2RoleforSSM" {
      policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEC2RoleforSSM"
      role       = aws_iam_role.node_group_role.id
    }
    
    resource "aws_iam_role_policy_attachment" "node_group_AmazonEKS_CNI_Policy" {
      policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
      role       = aws_iam_role.node_group_role.id
    }
    
    resource "aws_iam_role_policy_attachment" "node_group_CloudWatchAgentServerPolicy" {
      policy_arn = "arn:aws:iam::aws:policy/CloudWatchAgentServerPolicy"
      role       = aws_iam_role.node_group_role.id
    }
    
    resource "aws_eks_node_group" "node_group" {
      cluster_name  = aws_eks_cluster.eks.name
      disk_size     = 0
      capacity_type = "SPOT"
      labels        = {
        "eks/cluster-name"   = aws_eks_cluster.eks.name
        "eks/nodegroup-name" = format("nodegroup_%s", lower(aws_eks_cluster.eks.name))
      }
      node_group_name = format("nodegroup_%s", lower(aws_eks_cluster.eks.name))
      node_role_arn   = aws_iam_role.node_group_role.arn
    
      subnet_ids = local.private_subnets_id
    
      scaling_config {
        desired_size = local.desired_size
        max_size     = local.max_size
        min_size     = local.min_size
      }
    
      timeouts {}
    
      lifecycle {
        create_before_destroy = true
      }
    
      tags = merge({
        Name                 = local.name
        "eks/cluster-name"   = local.name
        "eks/nodegroup-name" = format("%s Node Group", aws_eks_cluster.eks.name)
        "eks/nodegroup-type" = "managed"
      }, local.tags)
    }
    


  • 변수.tf

  • variable "name" {
      description = "The EKS name"
    }
    
    variable "vpc_id" {
      type        = string
      description = "The VPC id"
    }
    
    variable "public_subnets_id" {
      description = "The public subnets id"
      type        = list
    }
    
    variable "private_subnets_id" {
      description = "The private subnets id"
      type        = list
    }
    
    variable "desired_size" {
      description = "The desired size of node"
      type        = number
      default     = 1
    }
    
    variable "max_size" {
      description = "The maximum size of node"
      type        = number
    }
    
    variable "min_size" {
      description = "The minimum size of node"
      type        = number
      default     = 0
    }
    
    variable "security_group_ids" {
      description = "The security groups to access EKS"
      type        = list
      default     = []
    }
    
    variable "eks_tags" {
      description = "A map of tags to add to EKS"
      type        = map(string)
      default     = {}
    }
    


  • 출력.tf

  • output "endpoint" {
      value = aws_eks_cluster.eks.endpoint
    }
    


    점프 서버



    EKS에 직접 액세스할 수 없으므로 EC2를 점프 서버로 사용하여 이를 수행할 수 있습니다. jumpserver 폴더의 소스 코드는 다음과 같습니다.
  • 메인.tf

  • locals {
      vpc_id             = var.vpc_id
      instance_type      = var.instance_type
      instance_ami       = var.instance_ami
      subnet_id          = var.public_subnets_id[0][0]
      security_group_ids = var.security_group_ids
      tags               = var.shared_tags
    }
    
    # Define the security group for jump server.
    resource "aws_security_group" "jump-server" {
      name   = "Security group for jump server"
      vpc_id = local.vpc_id
    
      ingress {
        from_port   = 22
        protocol    = "TCP"
        to_port     = 22
        cidr_blocks = ["0.0.0.0/0"]
        description = "Allow SSH inbound traffic"
      }
    
      # Allow all outbound traffic
      egress {
        from_port        = 0
        protocol         = "-1"
        to_port          = 0
        cidr_blocks      = ["0.0.0.0/0"]
        ipv6_cidr_blocks = ["::/0"]
        description      = "Allow all outbound traffic"
      }
    
      tags = merge({
        Name : "Security group for jump server"
      }, local.tags)
    }
    
    # Define the role to be attached ec2 instance of the jump server
    resource "aws_iam_role" "jump-server" {
      name               = "jump-server-role"
      assume_role_policy = jsonencode({
        "Version" : "2012-10-17",
        "Statement" : [
          {
            "Effect" : "Allow",
            "Action" : [
              "sts:AssumeRole"
            ],
            "Principal" : {
              "Service" : [
                "ec2.amazonaws.com"
              ]
            }
          }
        ]
      })
      tags = merge({
        Name : "Jump Server Role"
      }, local.tags)
    }
    
    # Attach the AdministratorAccess policy to jump server role
    resource "aws_iam_role_policy_attachment" "jump-server-AdministratorAccess" {
      policy_arn = "arn:aws:iam::aws:policy/AdministratorAccess"
      role       = aws_iam_role.jump-server.name
    }
    
    # Define iam instance profile for ec2 instance of the message transmission service
    resource "aws_iam_instance_profile" "jump-server-profile" {
      name = "jump-server-profile"
      role = aws_iam_role.jump-server.name
      tags = merge({
        Name : "Jump Server Profile"
      }, local.tags)
    }
    
    # Define the ec2 instance
    resource "aws_instance" "jump-server" {
      count                       = 1
      instance_type               = local.instance_type
      ami                         = local.instance_ami
      associate_public_ip_address = true
      hibernation                 = false
      subnet_id                   = local.subnet_id
      vpc_security_group_ids      = setunion([aws_security_group.jump-server.id], local.security_group_ids)
      iam_instance_profile        = aws_iam_instance_profile.jump-server-profile.name
      tags                        = merge({
        Name = "Jumper Server"
      }, local.tags)
    }
    


  • 변수.tf

  • variable "vpc_id" {
      type = string
    }
    
    variable "instance_type" {
      type    = string
      default = "t3.micro"
    }
    
    variable "instance_ami" {
      type    = string
      default = "ami-0f9fc25dd2506cf6d"
    }
    
    variable "public_subnets_id" {
      description = "The public subnet id"
      type        = list
    }
    
    variable "security_group_ids" {
      description = "The security group for jump server"
      type        = list
      default = []
    }
    
    variable "shared_tags" {
      description = "A map of tags to add to Jump Server"
      type        = map(string)
      default     = {}
    }
    


  • 출력.tf

  • output "jump-server-security-group-id" {
      value = aws_security_group.jump-server.id
    }
    


    여자 이름



    소스 코드region/virginia 폴더는 다음과 같습니다.
  • 공급자.tf

  • terraform {
      required_version = ">= 0.13.7"
      required_providers {
        aws = {
          source  = "hashicorp/aws"
          version = ">= 4.8.0"
        }
        random = {
          source  = "hashicorp/random"
          version = "3.1.0"
        }
        null = {
          source  = "hashicorp/null"
          version = "3.1.0"
        }
        local = {
          source = "hashicorp/local"
          version = "2.1.0"
        }
        template = {
          source = "hashicorp/template"
          version = "2.2.0"
        }
        # Kubernetes Provider
        kubernetes = {
          source  = "hashicorp/kubernetes"
          version = "2.1.0"
        }
      }
    
      backend "s3" {
        bucket  = "pin-terraform-state-us-east-1"
        key     = "terraform/backend.tfstate"
        region  = "us-east-1"
        encrypt = "true"
      }
    }
    


  • 메인.tf

  • locals {
      region             = "us-east-1"
      availability_zones = ["${local.region}a", "${local.region}b", "${local.region}c"]
      tags               = {
        "Environment" : "PROD"
        "Project" : "Infrastructure"
      }
    }
    
    provider "aws" {
      region = local.region
    }
    
    module "Networking" {
      source                     = "../../module/networking"
      name                       = "VPC"
      availability_zones         = local.availability_zones
      vpc_cidr_block             = "10.0.0.0/16"
      public_subnets_cidr_block  = ["10.0.32.0/24", "10.0.96.0/24", "10.0.224.0/24"]
      private_subnets_cidr_block = ["10.0.0.0/19", "10.0.64.0/19", "10.0.128.0/19"]
      vpc_tags                   = local.tags
    }
    
    module "JumpServer" {
      source            = "../../module/jumpserver"
      vpc_id            = module.Networking.vpc_id
      public_subnets_id = module.Networking.public_subnets_id
      shared_tags       = local.tags
    }
    
    module "EKS" {
      source             = "../../module/eks"
      name               = "EKS"
      vpc_id             = module.Networking.vpc_id
      public_subnets_id  = module.Networking.public_subnets_id
      private_subnets_id = module.Networking.private_subnets_id
      desired_size       = 4
      max_size           = 16
      eks_tags           = local.tags
    }
    
    


    이제 빌드할 수 있습니다.

    $ terraform init
    



    $ terraform apply
    


    결과



    좋은 웹페이지 즐겨찾기