์ธ์ฐ๋ค๐บ๐ธ ๐ช๐บ ๊ตฌ๋ฆ์ด ๋ง์ DevOps ํ๋ซํผ
140281 ๋จ์ด terraformkubernetesdevopsgooglecloud
์ฐ๋ฆฌ๋ ๋ ๊ฐ์ ํด๋ผ์ฐ๋ ๊ณต๊ธ์๊ฐ ๋ชจ๋ํฐ๋ง, ๋ก๊ทธ ๊ธฐ๋ก, DevOps ๋ฐฉ๋ฉด์์ ์์ฉํ ์ ์๋ ์๋ก ๋ค๋ฅธ ๊ตฌ์กฐ๋ฅผ ๋ณด์๋ค.์ฐ๋ฆฌ๋ ๋ ์ธํฐ๋ท ๋ฐ์ดํฐ ์๊ฐ์ ๋ํด ํ ๋ก ํ๋ค.
3๋ถ์์๋ GitLab๊ณผ Kubernetes๋ฅผ ์ฌ์ฉํ์ฌ Google Cloud์์ DevOps ํ๋ซํผ์ ๊ตฌ์ถํ๋ ๋ฐฉ๋ฒ์ ๋ณผ ์ ์์ต๋๋ค.
๊ณก๊ฐ์ด ์ธํ๋ผ๋ฅผ ๋ฐฐ์นํ๋ ๊ฒ๋ถํฐ ์์ํฉ์๋ค.
์ ๊ฒฐ ์กฐ๊ฑด
๊ณํ
devops
์ ์ฅ์๋ฅผ ๋ง๋ค๊ณ terraform ํ์ผ์ infra/plan
๋ก ๋ณต์ฌํฉ๋๋ค.์งํ.
์ฌ๋ฅ ์๋น์ค
์๋/ํ๋ฉด/ํ ๋ทฐtf
resource "google_project_service" "service" {
count = length(var.project_services)
project = var.project_id
service = element(var.project_services, count.index)
disable_on_destroy = false
}
data "google_project" "project" {
}
๊ฐ์ ์ ์ฉ ๋คํธ์ํฌ
๋ค์ ์งํ ํ์ผ์ด ์์ฑ๋ฉ๋๋ค.
resource "google_project_service" "service" {
count = length(var.project_services)
project = var.project_id
service = element(var.project_services, count.index)
disable_on_destroy = false
}
data "google_project" "project" {
}
resource "google_compute_network" "vpc" {
name = "vpc"
auto_create_subnetworks = false
project = var.project_id
depends_on = [google_project_service.service]
}
resource "google_compute_subnetwork" "subnet-vpc" {
name = "subnet"
ip_cidr_range = var.subnet_ip_range_primary
region = var.region
network = google_compute_network.vpc.id
project = var.project_id
secondary_ip_range = [
{
range_name = "secondary-ip-ranges-devops-services"
ip_cidr_range = var.subnet_secondary_ip_range_services
},
{
range_name = "secondary-ip-ranges-devops-pods"
ip_cidr_range = var.subnet_secondary_ip_range_pods
}
]
private_ip_google_access = false
}
resource "google_compute_address" "nat" {
count = 2
name = "nat-external-${count.index}"
project = var.project_id
region = var.region
depends_on = [google_project_service.service]
}
resource "google_compute_router" "router" {
name = "router"
project = var.project_id
region = var.region
network = google_compute_network.vpc.self_link
bgp {
asn = 64514
}
}
resource "google_compute_router_nat" "nat" {
name = "nat-1"
project = var.project_id
router = google_compute_router.router.name
region = var.region
nat_ip_allocate_option = "MANUAL_ONLY"
nat_ips = google_compute_address.nat.*.self_link
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
subnetwork {
name = google_compute_subnetwork.subnet-vpc.self_link
source_ip_ranges_to_nat = ["PRIMARY_IP_RANGE", "LIST_OF_SECONDARY_IP_RANGES"]
secondary_ip_range_names = [
google_compute_subnetwork.subnet-vpc.secondary_ip_range[0].range_name,
google_compute_subnetwork.subnet-vpc.secondary_ip_range[1].range_name,
]
}
}
GKE ํด๋ฌ์คํฐ
๋ค์ ์งํ ํ์ผ์ด ์์ฑ๋ฉ๋๋ค.
resource "google_container_cluster" "gke-devops-cluster" {
provider = google-beta
name = "gke-cluster-devops"
location = var.gke_devops_cluster_location
network = google_compute_network.vpc.id
subnetwork = google_compute_subnetwork.subnet-vpc.id
private_cluster_config {
enable_private_endpoint = false
enable_private_nodes = true
master_ipv4_cidr_block = var.master_ipv4_cidr_block
}
project = var.project_id
remove_default_node_pool = true
initial_node_count = 1
maintenance_policy {
recurring_window {
start_time = "2020-10-01T09:00:00-04:00"
end_time = "2050-10-01T17:00:00-04:00"
recurrence = "FREQ=WEEKLY"
}
}
enable_shielded_nodes = true
ip_allocation_policy {
cluster_secondary_range_name = "secondary-ip-ranges-devops-pods"
services_secondary_range_name = "secondary-ip-ranges-devops-services"
}
networking_mode = "VPC_NATIVE"
logging_service = "logging.googleapis.com/kubernetes"
monitoring_service = "monitoring.googleapis.com/kubernetes"
master_authorized_networks_config {
cidr_blocks {
cidr_block = var.gitlab_public_ip_ranges
display_name = "GITLAB PUBLIC IP RANGES"
}
cidr_blocks {
cidr_block = var.authorized_source_ranges
display_name = "Authorized IPs"
}
cidr_blocks {
cidr_block = "${google_compute_address.nat[0].address}/32"
display_name = "NAT IP 1"
}
cidr_blocks {
cidr_block = "${google_compute_address.nat[1].address}/32"
display_name = "NAT IP 2"
}
}
addons_config {
horizontal_pod_autoscaling {
disabled = false
}
http_load_balancing {
disabled = false
}
network_policy_config {
disabled = false
}
}
network_policy {
provider = "CALICO"
enabled = true
}
pod_security_policy_config {
enabled = false
}
release_channel {
channel = "STABLE"
}
workload_identity_config {
identity_namespace = "${var.project_id}.svc.id.goog"
}
database_encryption {
state = "ENCRYPTED"
key_name = google_kms_crypto_key.kubernetes-secrets.self_link
}
authentication.
master_auth {
username = ""
password = ""
client_certificate_config {
issue_client_certificate = false
}
}
depends_on = [
google_project_service.service,
google_project_iam_member.service-account,
google_compute_router_nat.nat
]
}
resource "google_container_node_pool" "gke-nodepools-default" {
project = var.project_id
name = "gke-nodepools-default"
location = var.gke_devops_cluster_location
cluster = google_container_cluster.gke-devops-cluster.name
initial_node_count = 1
node_config {
machine_type = var.node_pools_machine_type
metadata = {
disable-legacy-endpoints = "true"
}
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only"
]
tags = [
"gke-devops-nodes"]
}
}
resource "google_container_node_pool" "gke-nodepools-devops" {
project = var.project_id
name = "gke-nodepools-devops"
location = var.gke_devops_cluster_location
cluster = google_container_cluster.gke-devops-cluster.name
autoscaling {
max_node_count = 3
min_node_count = 0
}
node_config {
machine_type = var.node_pools_machine_type
preemptible = true
metadata = {
disable-legacy-endpoints = "true"
}
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only"
]
labels = {
"nodepool" = "devops"
}
taint {
key = "devops-reserved-pool"
value = "true"
effect = "NO_SCHEDULE"
}
tags = [
"gke-devops-nodes"]
}
}
resource "google_container_node_pool" "gke-nodepools-vault" {
project = var.project_id
name = "gke-nodepools-vault"
location = var.gke_devops_cluster_location
cluster = google_container_cluster.gke-devops-cluster.name
initial_node_count = 1
autoscaling {
max_node_count = 3
min_node_count = 1
}
node_config {
machine_type = var.node_pools_machine_type
service_account = google_service_account.vault-server.email
metadata = {
disable-legacy-endpoints = "true"
google-compute-enable-virtio-rng = "true"
}
oauth_scopes = [
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/cloud-platform"
]
labels = {
nodepool = "vault"
service = "vault"
}
workload_metadata_config {
node_metadata = "SECURE"
}
taint {
key = "vault-reserved-pool"
value = "true"
effect = "NO_SCHEDULE"
}
tags = [
"gke-devops-nodes", "vault"]
}
}
resource "google_compute_address" "vault" {
name = "vault-lb"
region = var.region
project = var.project_id
depends_on = [google_project_service.service]
}
output "address" {
value = google_compute_address.vault.address
}
์์นํ ์ง๋ถ
๋ค์ ์งํ ํ์ผ:
์ธํ๋ผ/ํ๋ฉด๋/๊ธ๊ณ .tf
resource "google_service_account" "vault-server" {
account_id = "vault-server"
display_name = "Vault Server"
project = var.project_id
}
resource "google_project_iam_member" "service-account" {
count = length(var.vault_service_account_iam_roles)
project = var.project_id
role = element(var.vault_service_account_iam_roles, count.index)
member = "serviceAccount:${google_service_account.vault-server.email}"
}
resource "google_project_iam_member" "service-account-custom" {
count = length(var.service_account_custom_iam_roles)
project = var.project_id
role = element(var.service_account_custom_iam_roles, count.index)
member = "serviceAccount:${google_service_account.vault-server.email}"
}
resource "google_storage_bucket" "vault" {
name = "${var.project_id}-vault-storage"
project = var.project_id
force_destroy = true
location = var.region
storage_class = "REGIONAL"
versioning {
enabled = true
}
lifecycle_rule {
action {
type = "Delete"
}
condition {
num_newer_versions = 1
}
}
depends_on = [google_project_service.service]
}
# Generate a random suffix for the KMS keyring. Like projects, key rings names
# must be globally unique within the project. A key ring also cannot be
# destroyed, so deleting and re-creating a key ring will fail.
#
# This uses a random_id to prevent that from happening.
resource "random_id" "kms_random" {
prefix = var.kms_key_ring_prefix
byte_length = "8"
}
# Obtain the key ring ID or use a randomly generated on.
locals {
kms_key_ring = var.kms_key_ring != "" ? var.kms_key_ring : random_id.kms_random.hex
}
resource "google_kms_key_ring" "vault" {
name = local.kms_key_ring
location = var.region
project = var.project_id
depends_on = [google_project_service.service]
}
resource "google_kms_crypto_key" "vault-init" {
name = var.kms_crypto_key
key_ring = google_kms_key_ring.vault.id
rotation_period = "604800s"
}
resource "google_kms_crypto_key" "kubernetes-secrets" {
name = var.kubernetes_secrets_crypto_key
key_ring = google_kms_key_ring.vault.id
rotation_period = "604800s"
}
resource "google_project_iam_member" "kubernetes-secrets-gke" {
project = var.project_id
role = "roles/cloudkms.cryptoKeyEncrypterDecrypter"
member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com"
}
TLS
๋ค์ ์งํ ํ์ผ:
resource "tls_private_key" "vault-ca" {
algorithm = "RSA"
rsa_bits = "2048"
}
resource "tls_self_signed_cert" "vault-ca" {
key_algorithm = tls_private_key.vault-ca.algorithm
private_key_pem = tls_private_key.vault-ca.private_key_pem
subject {
common_name = "vault-ca.local"
organization = "HashiCorp Vault"
}
validity_period_hours = 8760
is_ca_certificate = true
allowed_uses = [
"cert_signing",
"digital_signature",
"key_encipherment",
]
provisioner "local-exec" {
command = "echo '${self.cert_pem}' > ../tls/ca.pem && chmod 0600 ../tls/ca.pem"
}
}
resource "tls_private_key" "vault" {
algorithm = "RSA"
rsa_bits = "2048"
}
resource "tls_cert_request" "vault" {
key_algorithm = tls_private_key.vault.algorithm
private_key_pem = tls_private_key.vault.private_key_pem
dns_names = [
"vault",
"vault.local",
"vault.${var.public_dns_name}",
"vault.default.svc.cluster.local",
]
ip_addresses = [
google_compute_address.vault.address,
]
subject {
common_name = "vault.local"
organization = "HashiCorp Vault"
}
}
resource "tls_locally_signed_cert" "vault" {
cert_request_pem = tls_cert_request.vault.cert_request_pem
ca_key_algorithm = tls_private_key.vault-ca.algorithm
ca_private_key_pem = tls_private_key.vault-ca.private_key_pem
ca_cert_pem = tls_self_signed_cert.vault-ca.cert_pem
validity_period_hours = 8760
allowed_uses = [
"cert_signing",
"client_auth",
"digital_signature",
"key_encipherment",
"server_auth",
]
provisioner "local-exec" {
command = "echo '${self.cert_pem}' > ../tls/vault.pem && echo '${tls_self_signed_cert.vault-ca.cert_pem}' >> ../tls/vault.pem && chmod 0600 ../tls/vault.pem"
}
}
์ฟ ๋ฒ ๋ฅด๋คํธ์ค
๋ค์ ์งํ ํ์ผ:
infra/plan/k8s.tf
data "google_client_config" "current" {}
provider "kubernetes" {
load_config_file = false
host = google_container_cluster.gke-devops-cluster.endpoint
cluster_ca_certificate = base64decode(
google_container_cluster.gke-devops-cluster.master_auth[0].cluster_ca_certificate,
)
token = data.google_client_config.current.access_token
}
resource "kubernetes_secret" "vault-tls" {
metadata {
name = "vault-tls"
}
data = {
"vault.crt" = "${tls_locally_signed_cert.vault.cert_pem}\n${tls_self_signed_cert.vault-ca.cert_pem}"
"vault.key" = tls_private_key.vault.private_key_pem
"ca.crt" = tls_self_signed_cert.vault-ca.cert_pem
}
}
resource "kubernetes_service" "vault-lb" {
metadata {
name = "vault"
labels = {
app = "vault"
}
}
spec {
type = "LoadBalancer"
load_balancer_ip = google_compute_address.vault.address
load_balancer_source_ranges = [var.subnet_secondary_ip_range_pods, var.authorized_source_ranges]
external_traffic_policy = "Local"
selector = {
app = "vault"
}
port {
name = "vault-port"
port = 443
target_port = 8200
protocol = "TCP"
}
}
depends_on = [google_container_cluster.gke-devops-cluster]
}
resource "kubernetes_stateful_set" "vault" {
metadata {
name = "vault"
labels = {
app = "vault"
}
}
spec {
service_name = "vault"
replicas = var.num_vault_pods
selector {
match_labels = {
app = "vault"
}
}
template {
metadata {
labels = {
app = "vault"
}
}
spec {
termination_grace_period_seconds = 10
affinity {
pod_anti_affinity {
preferred_during_scheduling_ignored_during_execution {
weight = 50
pod_affinity_term {
topology_key = "kubernetes.io/hostname"
label_selector {
match_expressions {
key = "app"
operator = "In"
values = ["vault"]
}
}
}
}
}
}
node_selector = {
nodepool = "vault"
}
toleration {
key = "vault-reserved-pool"
operator = "Equal"
effect = "NoSchedule"
value = "true"
}
container {
name = "vault-init"
image = var.vault_init_container
image_pull_policy = "IfNotPresent"
resources {
requests {
cpu = "100m"
memory = "64Mi"
}
}
env {
name = "GCS_BUCKET_NAME"
value = google_storage_bucket.vault.name
}
env {
name = "KMS_KEY_ID"
value = google_kms_crypto_key.vault-init.self_link
}
env {
name = "VAULT_ADDR"
value = "http://127.0.0.1:8200"
}
env {
name = "VAULT_SECRET_SHARES"
value = var.vault_recovery_shares
}
env {
name = "VAULT_SECRET_THRESHOLD"
value = var.vault_recovery_threshold
}
}
container {
name = "vault"
image = var.vault_container
image_pull_policy = "IfNotPresent"
args = ["server"]
security_context {
capabilities {
add = ["IPC_LOCK"]
}
}
port {
name = "vault-port"
container_port = 8200
protocol = "TCP"
}
port {
name = "cluster-port"
container_port = 8201
protocol = "TCP"
}
resources {
requests {
cpu = "500m"
memory = "256Mi"
}
}
volume_mount {
name = "vault-tls"
mount_path = "/etc/vault/tls"
}
env {
name = "VAULT_ADDR"
value = "http://127.0.0.1:8200"
}
env {
name = "POD_IP_ADDR"
value_from {
field_ref {
field_path = "status.podIP"
}
}
}
env {
name = "VAULT_LOCAL_CONFIG"
value = <<EOF
api_addr = "https://vault.${var.public_dns_name}"
cluster_addr = "https://$(POD_IP_ADDR):8201"
log_level = "warn"
ui = true
seal "gcpckms" {
project = "${google_kms_key_ring.vault.project}"
region = "${google_kms_key_ring.vault.location}"
key_ring = "${google_kms_key_ring.vault.name}"
crypto_key = "${google_kms_crypto_key.vault-init.name}"
}
storage "gcs" {
bucket = "${google_storage_bucket.vault.name}"
ha_enabled = "true"
}
listener "tcp" {
address = "127.0.0.1:8200"
tls_disable = "true"
}
listener "tcp" {
address = "$(POD_IP_ADDR):8200"
tls_cert_file = "/etc/vault/tls/vault.crt"
tls_key_file = "/etc/vault/tls/vault.key"
tls_disable_client_certs = true
}
EOF
}
readiness_probe {
initial_delay_seconds = 5
period_seconds = 5
http_get {
path = "/v1/sys/health?standbyok=true"
port = 8200
scheme = "HTTPS"
}
}
}
volume {
name = "vault-tls"
secret {
secret_name = "vault-tls"
}
}
}
}
}
}
output "root_token_decrypt_command" {
value = "gsutil cat gs://${google_storage_bucket.vault.name}/root-token.enc | base64 --decode | gcloud kms decrypt --key ${google_kms_crypto_key.vault-init.self_link} --ciphertext-file - --plaintext-file -"
}
๊ธฐํ
infra/๊ณํ/๋ณ์.tf
variable "gke_devops_cluster_location" {
type = string
default = "europe-west1"
}
variable "region" {
type = string
}
variable "node_pools_machine_type" {
type = string
default = "e2-standard-2"
}
variable "master_ipv4_cidr_block" {
type = string
}
variable "subnet_ip_range_primary" {
type = string
default = "10.10.10.0/24"
}
variable "subnet_secondary_ip_range_services" {
type = string
default = "10.10.11.0/24"
}
variable "subnet_secondary_ip_range_pods" {
type = string
default = "10.1.0.0/20"
}
variable "public_dns_name" {
type = string
}
// deployment project id
variable "project_id" {
type = string
}
variable "gitlab_public_ip_ranges" {
type = string
description = "GITLAB PUBLIC IP RANGES"
}
variable "vault_service_account_iam_roles" {
type = list(string)
default = [
"roles/logging.logWriter",
"roles/monitoring.metricWriter",
"roles/monitoring.viewer",
"roles/cloudkms.cryptoKeyEncrypterDecrypter",
"roles/storage.objectAdmin"
]
description = "List of IAM roles to assign to the service account of vault."
}
variable "service_account_custom_iam_roles" {
type = list(string)
default = []
description = "List of arbitrary additional IAM roles to attach to the service account on the Vault nodes."
}
variable "project_services" {
type = list(string)
default = [
"secretmanager.googleapis.com",
"cloudkms.googleapis.com",
"cloudresourcemanager.googleapis.com",
"container.googleapis.com",
"compute.googleapis.com",
"iam.googleapis.com",
"logging.googleapis.com",
"monitoring.googleapis.com",
"cloudbuild.googleapis.com"
]
description = "List of services to enable on the project."
}
# This is an option used by the kubernetes provider, but is part of the Vault
# security posture.
variable "authorized_source_ranges" {
type = string
description = "Addresses or CIDR blocks which are allowed to connect to the Vault IP address. The default behavior is to allow anyone (0.0.0.0/0) access. You should restrict access to external IPs that need to access the Vault cluster."
}
#
# KMS options
# ------------------------------
variable "kms_key_ring_prefix" {
type = string
default = "vault"
description = "String value to prefix the generated key ring with."
}
variable "kms_key_ring" {
type = string
default = ""
description = "String value to use for the name of the KMS key ring. This exists for backwards-compatability for users of the existing configurations. Please use kms_key_ring_prefix instead."
}
variable "kms_crypto_key" {
type = string
default = "vault-init"
description = "String value to use for the name of the KMS crypto key."
}
variable "num_vault_pods" {
type = number
default = 3
description = "Number of Vault pods to run. Anti-affinity rules spread pods across available nodes. Please use an odd number for better availability."
}
#
# Kubernetes options
# ------------------------------
variable "kubernetes_secrets_crypto_key" {
type = string
default = "kubernetes-secrets"
description = "Name of the KMS key to use for encrypting the Kubernetes database."
}
variable "vault_container" {
type = string
default = "vault:1.2.1"
description = "Name of the Vault container image to deploy. This can be specified like \"container:version\" or as a full container URL."
}
variable "vault_init_container" {
type = string
default = "sethvargo/vault-init:1.0.0"
description = "Name of the Vault init container image to deploy. This can be specified like \"container:version\" or as a full container URL."
}
variable "vault_recovery_shares" {
type = string
default = "1"
description = "Number of recovery keys to generate."
}
variable "vault_recovery_threshold" {
type = string
default = "1"
description = "Number of recovery keys required for quorum. This must be less than or equal to \"vault_recovery_keys\"."
}
์ธํ๋ผ/ํ๋ฉด๋/์งํ๋.tfvarsregion = "<GCP_REGION>"
gke_devops_cluster_location = "<GCP_GKE_CLUSTER_ZONE>"
master_ipv4_cidr_block = "172.23.0.0/28"
project_id = "<GCP_PROJECT_ID>"
gitlab_public_ip_ranges = "34.74.90.64/28"
authorized_source_ranges = "<LOCAL_IP_RANGES>"
public_dns_name = "<PUBLIC_DNS_NAME>"
infra/๊ณํ/๋ฐฑ์๋.tfterraform {
backend "gcs" {
}
}
infra/๊ณํ/๋ฒ์ .tfterraform {
required_version = ">= 0.12"
required_providers {
google = "~> 3.0"
}
}
GCP ๋ฆฌ์์ค ๋ฐฐํฌ
DevOps ํ๋ซํผ์ ๋ฐฐํฌํ๊ธฐ ์ ์ ๋ค์๊ณผ ๊ฐ์ ๊ธ๋ก๋ฒ ๋ณ์๋ฅผ ๋ด๋ณด๋ด์ผ ํฉ๋๋ค.
export GCP_PROJECT_ID=<GCP_PROJECT_ID>
export SW_PROJECT_NAME=<SW_PROJECT_NAME>
export GIT_REPOSITORY_URL=<MY_REPO>/demo-env.git
export GCP_REGION_DEFAULT=europe-west1
export GCP_GKE_CLUSTER_ZONE=europe-west1-b
export GCP_KUBE_CONTEXT_NAME="gke_${GCP_PROJECT_ID}_${GCP_GKE_CLUSTER_ZONE}_gke-cluster-devops"
export PUBLIC_DNS_NAME=
export PUBLIC_DNS_ZONE_NAME=
export TERRAFORM_BUCKET_NAME=bucket-${GCP_PROJECT_ID}-sw-gcp-terraform-backend
์งํ ์ํ๋ฅผ ์ํ ๊ตฌ๊ธ ํด๋ผ์ฐ๋ ์ ์ฅํต ๋ง๋ค๊ธฐ:
gcloud config set project ${GCP_PROJECT_ID}
gsutil mb -c standard -l ${GCP_REGION_DEFAULT} gs://${TERRAFORM_BUCKET_NAME}
gsutil versioning set onย gs://${TERRAFORM_BUCKET_NAME}
๋ฐฑ์๋ ์ค์ ์ ์์ฑํ์ฌterraform์ ์ด๊ธฐํํฉ๋๋ค.์ด๋ฌํ ์ฃผ๋ GCP์ ์ ์ฅ๋ฉ๋๋ค.
cd infra/plan
terraform init \
-backend-config="bucket=${TERRAFORM_BUCKET_NAME}" \
-backend-config="prefix=googlecloud/terraform/state"
ํ์ฌ ์ฐ๋ฆฌ๋ ํ์ผinfra/plan/terraform.tfvars
์ ๋ณ์๋ฅผ ์์ฑํ๊ณ ๊ตฌ๊ธ ํด๋ผ์ฐ๋์ ์ฐ๋ฆฌ์ DevOps ํ๋ซํผ์ ๋ฐฐ์นํ ์ ์์ต๋๋ค!
sed -i "s/<LOCAL_IP_RANGES>/$(curl -s http://checkip.amazonaws.com/)\/32/g;s/<PUBLIC_DNS_NAME>/${PUBLIC_DNS_NAME}/g;s/<GCP_PROJECT_ID>/${GCP_PROJECT_ID}/g;s/<GCP_REGION>/${GCP_REGION_DEFAULT}/g;s/<GCP_GKE_CLUSTER_ZONE>/${GCP_GKE_CLUSTER_ZONE}/g" terraform.tfvars
terraform apply
์งํ์ด ์์ฑ๋๋ฉด ๋ค์ ๋ช
๋ น์ ์ฌ์ฉํ์ฌ GKE ํด๋ฌ์คํฐ์ ์ก์ธ์คํ ์ ์์ต๋๋ค.
gcloud container clusters get-credentials gke-cluster-devops --zone ${GCP_GKE_CLUSTER_ZONE} --project ${GCP_PROJECT_ID}
Vault ๊ตฌ์ฑ
Vault์ ์ฑ๋ ์๊ฒฉ ์ฆ๋ช
์ ์ ์ฅํ๋ ค๋ฉด Vault์ ๋ํ ํ๊ฒฝ ๋ณ์๋ฅผ ์ค์ ํด์ผ ํฉ๋๋ค.
Vault์ ์ฃผ์, ์ธ์ฆ์ ์ํ CA ๋ฐ ์ด๊ธฐ ๋ฃจํธ ํ ํฐ์ ์ค์ ํฉ๋๋ค.
# cd infra/plan
export VAULT_ADDR="https://$(terraform output address)"
export VAULT_TOKEN="$(eval `terraform output root_token_decrypt_command`)"
export VAULT_CAPATH="$(cd ../ && pwd)/tls/ca.pem"
์ฑ๋ ์๊ฒฉ ์ฆ๋ช
์ ์ ์ฅํ๋ ค๋ฉด:
vault secrets enable -path=scaleway/project/${SW_PROJECT_NAME} -version=2 kv
vault kv put scaleway/project/${SW_PROJECT_NAME}/credentials/access key="<SCW_ACCESS_KEY>"
vault kv put scaleway/project/${SW_PROJECT_NAME}/credentials/secret key="<SCW_SECRET_KEY>"
vault kv put scaleway/project/${SW_PROJECT_NAME}/config id="<SW_PROJECT_ID>"
Gitlab ํ์ดํ๋ผ์ธ์์ ์งํ ์ํ๋ฅผ ์ฝ๊ณ , ๊ธฐ๋ฐ์ ์ฝ๊ณ , docker ์ด๋ฏธ์ง๋ฅผ ๊ตฌ์ถํ๋ ค๋ฉด ํ์ํ ๊ถํ์ด ์๋ Google ์๋น์ค ๊ณ์ (GSA) ์ ๋ง๋ค์ด์ผ ํฉ๋๋ค.
gcloud iam service-accounts create gsa-dev-deployer
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/container.developer \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/secretmanager.secretAccessor \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/cloudbuild.builds.builder \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/iam.serviceAccountAdmin \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
Gitlab runner ์์
์ ์คํํ๋ ๋ค์์คํ์ด์ค๋ฅผ ๋ง๋ค๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ญ์์ค.
kubectl create namespace sw-dev
Gitlab runner ์์
์ด GCP ์์์ ์ ๊ทผํ ์ ์๋๋ก ํ๊ธฐ ์ํด์ GSA๋ ์์์ ๋ง๋ GSA๋ฅผ Kubernetes ์๋น์ค ๊ณ์ (KSA) ์ ์ฐ๊ฒฐํด์ผ ํฉ๋๋ค [2]:
kubectl create serviceaccount -n sw-dev ksa-sw-dev-deployer
gcloud iam service-accounts add-iam-policy-binding \
--role roles/iam.workloadIdentityUser \
--member "serviceAccount:${GCP_PROJECT_ID}.svc.id.goog[sw-dev/ksa-sw-dev-deployer]" \
gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com
kubectl annotate serviceaccount \
-n sw-dev \
ksa-sw-dev-deployer \
iam.gke.io/gcp-service-account=gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com
์ด์ Gitlab runner๊ฐVault์์ ๋ด์ฉ์ ์ฝ์ ์ ์๋ ์ ์ฑ
policy-sw-dev-deployer
์ ๋ง๋ญ๋๋ค.
vault policy write policy-sw-dev-deployer - <<EOF
# Read-only permissions
path "scaleway/project/${SW_PROJECT_NAME}/*" {
capabilities = [ "read" ]
}
EOF
์ํจ๋ฅผ ๋ง๋ค๊ณ policy-sw-dev-deployer
์ ์ฑ
์ ์ถ๊ฐํฉ๋๋ค.
GITLAB_RUNNER_VAULT_TOKEN=$(vault token create -policy=policy-sw-dev-deployer | grep "token" | awk 'NR==1{print $2}')
์์ ์ ๊ทผ ์ํจ๋ฅผ ๋ ์ข์ํ๋ค๋ฉด, ์๋์ผ๋ก ์ํจ์ ๊ทธ๋ฃน ์ ์ฑ
์ ํ ๋นํ๊ธฐ ์ํด auth ๋ฐฉ๋ฒ์ ์ค์ ํ์ญ์์ค:
vault auth enable approle
vault write auth/approle/role/sw-dev-deployer \
secret_id_ttl=10m \
token_num_uses=10 \
token_ttl=20m \
token_max_ttl=30m \
secret_id_num_uses=40 \
token_policies=policy-sw-dev-deployer
ROLE_ID=$(vault read -field=role_id auth/approle/role/sw-dev-deployer/role-id)
SECRET_ID=$(vault write -f -field=secret_id auth/approle/role/sw-dev-deployer/secret-id)
GITLAB_RUNNER_VAULT_TOKEN=$(vault write auth/approle/login role_id="$ROLE_ID" secret_id="$SECRET_ID" | grep "token" | awk 'NR==1{print $2}')
Vault๋ ์ค์ ๋ก ์ด์ ์ ์์ฑ๋ ๊ณต์ฉ IP์์ ์ก์ธ์คํ ์ ์์ต๋๋ค.๊ทธ๋ฌ๋ Google Cloud DNS์ ์์ ์ ๋๋ฉ์ธ ์ด๋ฆ์ ๋ฑ๋กํ ๊ฒฝ์ฐ ํ์ ๋๋ฉ์ธ์์Vault์ ์ก์ธ์คํ ๋ณ์นญ ๋ ์ฝ๋๋ฅผ ๋ง๋ค ์ ์์ต๋๋ค.
gcloud dns record-sets transaction start --zone=$PUBLIC_DNS_ZONE_NAME
gcloud dns record-sets transaction add "$(gcloud compute addresses list --filter=name=vault-lb --format="value(ADDRESS)")" --name=vault.$PUBLIC_DNS_NAME. --ttl=300 --type=A --zone=$PUBLIC_DNS_ZONE_NAME
gcloud dns record-sets transaction execute --zone=$PUBLIC_DNS_ZONE_NAME
VAULT_ADDR="https://vault.${PUBLIC_DNS_NAME}"
Google Secret Manager์ vault ํ ํฐ์ ์ ์ฅํ์ฌ ๊ตฌ์ฑ์ ์๋ฃํ์ต๋๋ค.
gcloud beta secrets create vault-token --locations $GCP_REGION_DEFAULT --replication-policy user-managed
echo -n "${GITLAB_RUNNER_VAULT_TOKEN}" | gcloud beta secrets versions add vault-token --data-file=-
GitLab ๊ตฌ์ฑ
Gitlab CI
Gitlab SSH ํค๋ฅผ ์์ฑํ์ฌ ์ด์์๊ฐ git ์ ์ฅ์๋ฅผ ๋ฐ์ด ๋ฃ๊ณ Secret Manager์ ๊ฐ์ธ ํค๋ฅผ ์ ์ฅํ ์ ์๋๋ก ํฉ๋๋ค.
gcloud beta secrets create gitlab-ssh-key --locations $GCP_REGION_DEFAULT --replication-policy user-managed
cd ~/.ssh
ssh-keygen -t rsa -b 4096
gcloud beta secrets versions add gitlab-ssh-key --data-file=./id_rsa
ํ๊ฒฝ ๋ณ์์ ๊ฐ์ธ ํค๋ฅผ ์ ์ฅํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ญ์์ค.
GITLAB_SSH_KEY=$(gcloud secrets versions access latest --secret=gitlab-ssh-key)
Gitlab runner๋ ์ปจํ
์ด๋์์ ์คํ๋ฉ๋๋ค.Docker ์ด๋ฏธ์ง๋ฅผ ๋น ๋ฅด๊ฒ ๋ง๋ค๋ ค๋ฉด Vault, ArgoCD, Terraform, gcloud sdk, sw cli ๋ฑ ํ์ํ ๋ชจ๋ ๋๊ตฌ๋ฅผ ์ฌ์ฉํฉ๋๋ค.
docker/Dockerfile
FROM gcr.io/google.com/cloudsdktool/cloud-sdk:alpine
RUN gcloud components install kustomize kpt kubectl alpha beta
# install argocd
RUN curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')/argocd-linux-amd64
RUN chmod +x /usr/local/bin/argocd
# install vault
ENV VAULT_VERSION=1.6.0
RUN curl -sS "https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip" > vault.zip && \
unzip vault.zip -d /usr/bin && \
rm vault.zip
# install terraform
ENV TERRAFORM_VERSION=0.12.24
RUN curl -sS "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" > terraform.zip && \
unzip terraform.zip -d /usr/bin && \
rm terraform.zip
# Install sw cli
ENV SCW_VERSION=2.2.3
RUN curl -o /usr/local/bin/scw -L "https://github.com/scaleway/scaleway-cli/releases/download/v${SCW_VERSION}/scw-${SCW_VERSION}-linux-x86_64"
RUN chmod +x /usr/local/bin/scw
RUN vault -v
RUN terraform -v
RUN argocd
RUN gcloud -v
RUN scw version
ARG GITLAB_SSH_KEY
ARG VAULT_ADDR
ARG VAULT_CA
ARG VAULT_TOKEN
RUN echo -n $VAULT_CA > /home/ca.pem
RUN sed -i 's/\\n/\n/g' /home/ca.pem
ENV GITLAB_SSH_KEY=$GITLAB_SSH_KEY
ENV VAULT_ADDR=$VAULT_ADDR
ENV VAULT_TOKEN=$VAULT_TOKEN
ENV VAULT_CAPATH="/home/ca.pem"
docker/cloudbuild.์๋ง๋ฅด
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build',
'--build-arg',
'VAULT_CA=${_VAULT_CA}',
'--build-arg',
'GITLAB_SSH_KEY=${_GITLAB_SSH_KEY}',
'-t', 'eu.gcr.io/${_PROJECT_ID}/tools:$_VERSION',
'.' ]
images:
- 'eu.gcr.io/${_PROJECT_ID}/tools:$_VERSION'
Google ์ปจํ
์ด๋ ๋ ์ง์คํธ๋ฆฌ(GCR)์ Google Cloud Build๋ฅผ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง๋ฅผ ๊ฒ์ํฉ๋๋ค.
cd docker
gcloud builds submit --config cloudbuild.yaml --substitutions \
_VAULT_CA="$(awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' ../infra/tls/ca.pem)",_VERSION="latest",_GITLAB_SSH_KEY="$GITLAB_SSH_KEY",_PROJECT_ID="$GCP_PROJECT_ID"
ํ์ฌ Kubernetes์์ Gitlab runner๋ฅผ ์ค์ ํ ์ ์์ต๋๋ค.ํฌ๋ฉง ์ฅ์ฐฉ 3๋ถํฐ ์์:
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
Gitlab runner๋ฅผ ์ํkubernetes ์ญํ ์ ๋ง๋ญ๋๋ค.
gitlab/dev/rbac-gitlab-demo-dev.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ksa-sw-devops-gitlab-deployer
namespace: sw-dev
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: role-ksa-sw-devops-gitlab-deployer
namespace: sw-dev
rules:
- apiGroups: [""] # "" indicates the sw API group
resources: ["pods", "pods/exec", "secrets"]
verbs: ["get", "list", "watch", "create", "patch", "delete"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rolebinding-ksa-sw-devops-gitlab-deployer
namespace: sw-dev
subjects:
- kind: ServiceAccount
name: ksa-sw-devops-gitlab-deployer # Name is case sensitive
apiGroup: ""
roleRef:
kind: Role #this must be Role or ClusterRole
name: role-ksa-sw-devops-gitlab-deployer # this must match the name of the Role or ClusterRole you wish to bind to
apiGroup: rbac.authorization.k8s.io
kubectl apply -f gitlab/dev/rbac-gitlab-demo-dev.yml
Gitlab Helm ํจํค์ง๋ฅผ ์ถ๊ฐํ๋ ค๋ฉด:
helm repo add gitlab https://charts.gitlab.io
๋ฌ๋ฆฌ๊ธฐ ์ ์ ๋ฑ๋ก ์ํจ๋ฅผ ๊ธฐ๋ฐ๋ก ์ ์ฅ:
kubectl create secret generic secret-sw-devops-gitlab-runner-tokens --from-literal=runner-token='' --from-literal=runner-registration-token='<DEMO_INFRA_REPO_RUNNER_TOKEN>' -n sw-dev
Kubernetes์ Gitlab Runner ๋ฐฐํฌ:
gitlab/dev/values.์๋ง๋ฅด
## Specify a imagePullPolicy
## 'Always' if imageTag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: IfNotPresent
## The GitLab Server URL (with protocol) that want to register the runner against
## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-register
##
gitlabUrl: https://gitlab.com/
## The registration token for adding new Runners to the GitLab server. This must
## be retrieved from your GitLab instance.
## ref: https://docs.gitlab.com/ee/ci/runners/
##
# runnerRegistrationToken: "<>"
## Unregister all runners before termination
##
## Updating the runner's chart version or configuration will cause the runner container
## to be terminated and created again. This may cause your Gitlab instance to reference
## non-existant runners. Un-registering the runner before termination mitigates this issue.
## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-unregister
##
unregisterRunners: true
## When stopping the runner, give it time to wait for its jobs to terminate.
##
## Updating the runner's chart version or configuration will cause the runner container
## to be terminated with a graceful stop request. terminationGracePeriodSeconds
## instructs Kubernetes to wait long enough for the runner pod to terminate gracefully.
## ref: https://docs.gitlab.com/runner/commands/#signals
terminationGracePeriodSeconds: 3600
## Set the certsSecretName in order to pass custom certificates for GitLab Runner to use
## Provide resource name for a Kubernetes Secret Object in the same namespace,
## this is used to populate the /etc/gitlab-runner/certs directory
## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates
##
#certsSecretName:
## Configure the maximum number of concurrent jobs
## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
##
concurrent: 10
## Defines in seconds how often to check GitLab for a new builds
## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
##
checkInterval: 30
## For RBAC support:
rbac:
create: false
## Run the gitlab-bastion container with the ability to deploy/manage containers of jobs
## cluster-wide or only within namespace
clusterWideAccess: false
## If RBAC is disabled in this Helm chart, use the following Kubernetes Service Account name.
##
serviceAccountName: ksa-sw-devops-gitlab-deployer
## Configure integrated Prometheus metrics exporter
## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server
##
metrics:
enabled: true
## Configuration for the Pods that the runner launches for each new job
##
runners:
# config: |
# [[runners]]
# [runners.kubernetes]
# image = "ubuntu:16.04"
## Default container image to use for builds when none is specified
##
image: ubuntu:18.04
## Specify whether the runner should be locked to a specific project: true, false. Defaults to true.
##
locked: false
## The amount of time, in seconds, that needs to pass before the runner will
## timeout attempting to connect to the container it has just created.
## ref: https://docs.gitlab.com/runner/executors/kubernetes.html
##
pollTimeout: 360
## Specify whether the runner should only run protected branches.
## Defaults to False.
##
## ref: https://docs.gitlab.com/ee/ci/runners/#protected-runners
##
protected: true
## Service Account to be used for runners
##
serviceAccountName: ksa-sw-dev-deployer
## Run all containers with the privileged flag enabled
## This will allow the docker:stable-dind image to run if you need to run Docker
## commands. Please read the docs before turning this on:
## ref: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind
##
privileged: false
## The name of the secret containing runner-token and runner-registration-token
secret: secret-sw-devops-gitlab-runner-tokens
## Namespace to run Kubernetes jobs in (defaults to 'default')
##
namespace: sw-dev
## Build Container specific configuration
##
builds:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Service Container specific configuration
##
services:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Helper Container specific configuration
##
helpers:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Specify the tags associated with the runner. Comma-separated list of tags.
##
## ref: https://docs.gitlab.com/ce/ci/runners/#using-tags
##
tags: "k8s-dev-runner"
## Node labels for pod assignment
##
nodeSelector:
nodepool: devops
## Specify node tolerations for CI job pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
nodeTolerations:
- key: "devops-reserved-pool"
operator: "Equal"
value: "true"
effect: "NoSchedule"
## Configure environment variables that will be injected to the pods that are created while
## the build is running. These variables are passed as parameters, i.e. `--env "NAME=VALUE"`,
## to `gitlab-runner register` command.
##
## Note that `envVars` (see below) are only present in the runner pod, not the pods that are
## created for each build.
##
## ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register
##
# env:
## Distributed runners caching
## ref: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-runners-caching
##
## If you want to use gcs based distributing caching:
## First of all you need to uncomment General settings and GCS settings sections.
# cache:
## General settings
# cacheType: gcs
# cachePath: "k8s_platform_sw_devops_runner"
# cacheShared: false
## GCS settings
# gcsBucketName:
## Use this line for access using access-id and private-key
# secretName: gcsaccess
## Use this line for access using google-application-credentials file
# secretName: google-application-credentials
## Helper container security context configuration
## Refer to https://docs.gitlab.com/runner/executors/kubernetes.html#using-security-context
# pod_security_context:
# run_as_non_root: true
# run_as_user: 100
# run_as_group: 100
# fs_group: 65533
# supplemental_groups: [101, 102]
helm install -n sw-dev sw-dev -f gitlab/dev/values.yaml gitlab/gitlab-runner
Gitlab ์คํ ํ๋ก๊ทธ๋จ์ ์ ์ ํ ๋ผ์ด์ผ์ค๊ฐ ์๋์ง ํ
์คํธํด ๋ณด๊ฒ ์ต๋๋ค.
kubectl run -it \
--image eu.gcr.io/${GCP_PROJECT_ID}/tools \
--serviceaccount ksa-sw-dev-deployer \
--namespace sw-dev \
gitlab-runner-auth-test
pod ์ฉ๊ธฐ์์ ์คํgcloud auth list
.์ดํpodkubectl delete pod gitlab-runner-auth-test -n sw-dev
๋ฅผ ์ญ์ ํ ์ ์์ต๋๋ค.
Gitlab ํ๋ก์ ํธ
export GCP_PROJECT_ID=<GCP_PROJECT_ID>
export SW_PROJECT_NAME=<SW_PROJECT_NAME>
export GIT_REPOSITORY_URL=<MY_REPO>/demo-env.git
export GCP_REGION_DEFAULT=europe-west1
export GCP_GKE_CLUSTER_ZONE=europe-west1-b
export GCP_KUBE_CONTEXT_NAME="gke_${GCP_PROJECT_ID}_${GCP_GKE_CLUSTER_ZONE}_gke-cluster-devops"
export PUBLIC_DNS_NAME=
export PUBLIC_DNS_ZONE_NAME=
export TERRAFORM_BUCKET_NAME=bucket-${GCP_PROJECT_ID}-sw-gcp-terraform-backend
gcloud config set project ${GCP_PROJECT_ID}
gsutil mb -c standard -l ${GCP_REGION_DEFAULT} gs://${TERRAFORM_BUCKET_NAME}
gsutil versioning set onย gs://${TERRAFORM_BUCKET_NAME}
cd infra/plan
terraform init \
-backend-config="bucket=${TERRAFORM_BUCKET_NAME}" \
-backend-config="prefix=googlecloud/terraform/state"
sed -i "s/<LOCAL_IP_RANGES>/$(curl -s http://checkip.amazonaws.com/)\/32/g;s/<PUBLIC_DNS_NAME>/${PUBLIC_DNS_NAME}/g;s/<GCP_PROJECT_ID>/${GCP_PROJECT_ID}/g;s/<GCP_REGION>/${GCP_REGION_DEFAULT}/g;s/<GCP_GKE_CLUSTER_ZONE>/${GCP_GKE_CLUSTER_ZONE}/g" terraform.tfvars
terraform apply
gcloud container clusters get-credentials gke-cluster-devops --zone ${GCP_GKE_CLUSTER_ZONE} --project ${GCP_PROJECT_ID}
Vault์ ์ฑ๋ ์๊ฒฉ ์ฆ๋ช ์ ์ ์ฅํ๋ ค๋ฉด Vault์ ๋ํ ํ๊ฒฝ ๋ณ์๋ฅผ ์ค์ ํด์ผ ํฉ๋๋ค.
Vault์ ์ฃผ์, ์ธ์ฆ์ ์ํ CA ๋ฐ ์ด๊ธฐ ๋ฃจํธ ํ ํฐ์ ์ค์ ํฉ๋๋ค.
# cd infra/plan
export VAULT_ADDR="https://$(terraform output address)"
export VAULT_TOKEN="$(eval `terraform output root_token_decrypt_command`)"
export VAULT_CAPATH="$(cd ../ && pwd)/tls/ca.pem"
์ฑ๋ ์๊ฒฉ ์ฆ๋ช
์ ์ ์ฅํ๋ ค๋ฉด:vault secrets enable -path=scaleway/project/${SW_PROJECT_NAME} -version=2 kv
vault kv put scaleway/project/${SW_PROJECT_NAME}/credentials/access key="<SCW_ACCESS_KEY>"
vault kv put scaleway/project/${SW_PROJECT_NAME}/credentials/secret key="<SCW_SECRET_KEY>"
vault kv put scaleway/project/${SW_PROJECT_NAME}/config id="<SW_PROJECT_ID>"
Gitlab ํ์ดํ๋ผ์ธ์์ ์งํ ์ํ๋ฅผ ์ฝ๊ณ , ๊ธฐ๋ฐ์ ์ฝ๊ณ , docker ์ด๋ฏธ์ง๋ฅผ ๊ตฌ์ถํ๋ ค๋ฉด ํ์ํ ๊ถํ์ด ์๋ Google ์๋น์ค ๊ณ์ (GSA) ์ ๋ง๋ค์ด์ผ ํฉ๋๋ค.gcloud iam service-accounts create gsa-dev-deployer
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/container.developer \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/secretmanager.secretAccessor \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/cloudbuild.builds.builder \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \
--role roles/iam.serviceAccountAdmin \
--member "serviceAccount:gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com"
Gitlab runner ์์
์ ์คํํ๋ ๋ค์์คํ์ด์ค๋ฅผ ๋ง๋ค๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ญ์์ค.kubectl create namespace sw-dev
Gitlab runner ์์
์ด GCP ์์์ ์ ๊ทผํ ์ ์๋๋ก ํ๊ธฐ ์ํด์ GSA๋ ์์์ ๋ง๋ GSA๋ฅผ Kubernetes ์๋น์ค ๊ณ์ (KSA) ์ ์ฐ๊ฒฐํด์ผ ํฉ๋๋ค [2]:kubectl create serviceaccount -n sw-dev ksa-sw-dev-deployer
gcloud iam service-accounts add-iam-policy-binding \
--role roles/iam.workloadIdentityUser \
--member "serviceAccount:${GCP_PROJECT_ID}.svc.id.goog[sw-dev/ksa-sw-dev-deployer]" \
gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com
kubectl annotate serviceaccount \
-n sw-dev \
ksa-sw-dev-deployer \
iam.gke.io/gcp-service-account=gsa-dev-deployer@${GCP_PROJECT_ID}.iam.gserviceaccount.com
์ด์ Gitlab runner๊ฐVault์์ ๋ด์ฉ์ ์ฝ์ ์ ์๋ ์ ์ฑ
policy-sw-dev-deployer
์ ๋ง๋ญ๋๋ค.vault policy write policy-sw-dev-deployer - <<EOF
# Read-only permissions
path "scaleway/project/${SW_PROJECT_NAME}/*" {
capabilities = [ "read" ]
}
EOF
์ํจ๋ฅผ ๋ง๋ค๊ณ policy-sw-dev-deployer
์ ์ฑ
์ ์ถ๊ฐํฉ๋๋ค.GITLAB_RUNNER_VAULT_TOKEN=$(vault token create -policy=policy-sw-dev-deployer | grep "token" | awk 'NR==1{print $2}')
์์ ์ ๊ทผ ์ํจ๋ฅผ ๋ ์ข์ํ๋ค๋ฉด, ์๋์ผ๋ก ์ํจ์ ๊ทธ๋ฃน ์ ์ฑ
์ ํ ๋นํ๊ธฐ ์ํด auth ๋ฐฉ๋ฒ์ ์ค์ ํ์ญ์์ค:vault auth enable approle
vault write auth/approle/role/sw-dev-deployer \
secret_id_ttl=10m \
token_num_uses=10 \
token_ttl=20m \
token_max_ttl=30m \
secret_id_num_uses=40 \
token_policies=policy-sw-dev-deployer
ROLE_ID=$(vault read -field=role_id auth/approle/role/sw-dev-deployer/role-id)
SECRET_ID=$(vault write -f -field=secret_id auth/approle/role/sw-dev-deployer/secret-id)
GITLAB_RUNNER_VAULT_TOKEN=$(vault write auth/approle/login role_id="$ROLE_ID" secret_id="$SECRET_ID" | grep "token" | awk 'NR==1{print $2}')
Vault๋ ์ค์ ๋ก ์ด์ ์ ์์ฑ๋ ๊ณต์ฉ IP์์ ์ก์ธ์คํ ์ ์์ต๋๋ค.๊ทธ๋ฌ๋ Google Cloud DNS์ ์์ ์ ๋๋ฉ์ธ ์ด๋ฆ์ ๋ฑ๋กํ ๊ฒฝ์ฐ ํ์ ๋๋ฉ์ธ์์Vault์ ์ก์ธ์คํ ๋ณ์นญ ๋ ์ฝ๋๋ฅผ ๋ง๋ค ์ ์์ต๋๋ค.gcloud dns record-sets transaction start --zone=$PUBLIC_DNS_ZONE_NAME
gcloud dns record-sets transaction add "$(gcloud compute addresses list --filter=name=vault-lb --format="value(ADDRESS)")" --name=vault.$PUBLIC_DNS_NAME. --ttl=300 --type=A --zone=$PUBLIC_DNS_ZONE_NAME
gcloud dns record-sets transaction execute --zone=$PUBLIC_DNS_ZONE_NAME
VAULT_ADDR="https://vault.${PUBLIC_DNS_NAME}"
Google Secret Manager์ vault ํ ํฐ์ ์ ์ฅํ์ฌ ๊ตฌ์ฑ์ ์๋ฃํ์ต๋๋ค.gcloud beta secrets create vault-token --locations $GCP_REGION_DEFAULT --replication-policy user-managed
echo -n "${GITLAB_RUNNER_VAULT_TOKEN}" | gcloud beta secrets versions add vault-token --data-file=-
GitLab ๊ตฌ์ฑ
Gitlab CI
Gitlab SSH ํค๋ฅผ ์์ฑํ์ฌ ์ด์์๊ฐ git ์ ์ฅ์๋ฅผ ๋ฐ์ด ๋ฃ๊ณ Secret Manager์ ๊ฐ์ธ ํค๋ฅผ ์ ์ฅํ ์ ์๋๋ก ํฉ๋๋ค.
gcloud beta secrets create gitlab-ssh-key --locations $GCP_REGION_DEFAULT --replication-policy user-managed
cd ~/.ssh
ssh-keygen -t rsa -b 4096
gcloud beta secrets versions add gitlab-ssh-key --data-file=./id_rsa
ํ๊ฒฝ ๋ณ์์ ๊ฐ์ธ ํค๋ฅผ ์ ์ฅํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ญ์์ค.
GITLAB_SSH_KEY=$(gcloud secrets versions access latest --secret=gitlab-ssh-key)
Gitlab runner๋ ์ปจํ
์ด๋์์ ์คํ๋ฉ๋๋ค.Docker ์ด๋ฏธ์ง๋ฅผ ๋น ๋ฅด๊ฒ ๋ง๋ค๋ ค๋ฉด Vault, ArgoCD, Terraform, gcloud sdk, sw cli ๋ฑ ํ์ํ ๋ชจ๋ ๋๊ตฌ๋ฅผ ์ฌ์ฉํฉ๋๋ค.
docker/Dockerfile
FROM gcr.io/google.com/cloudsdktool/cloud-sdk:alpine
RUN gcloud components install kustomize kpt kubectl alpha beta
# install argocd
RUN curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')/argocd-linux-amd64
RUN chmod +x /usr/local/bin/argocd
# install vault
ENV VAULT_VERSION=1.6.0
RUN curl -sS "https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip" > vault.zip && \
unzip vault.zip -d /usr/bin && \
rm vault.zip
# install terraform
ENV TERRAFORM_VERSION=0.12.24
RUN curl -sS "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" > terraform.zip && \
unzip terraform.zip -d /usr/bin && \
rm terraform.zip
# Install sw cli
ENV SCW_VERSION=2.2.3
RUN curl -o /usr/local/bin/scw -L "https://github.com/scaleway/scaleway-cli/releases/download/v${SCW_VERSION}/scw-${SCW_VERSION}-linux-x86_64"
RUN chmod +x /usr/local/bin/scw
RUN vault -v
RUN terraform -v
RUN argocd
RUN gcloud -v
RUN scw version
ARG GITLAB_SSH_KEY
ARG VAULT_ADDR
ARG VAULT_CA
ARG VAULT_TOKEN
RUN echo -n $VAULT_CA > /home/ca.pem
RUN sed -i 's/\\n/\n/g' /home/ca.pem
ENV GITLAB_SSH_KEY=$GITLAB_SSH_KEY
ENV VAULT_ADDR=$VAULT_ADDR
ENV VAULT_TOKEN=$VAULT_TOKEN
ENV VAULT_CAPATH="/home/ca.pem"
docker/cloudbuild.์๋ง๋ฅด
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build',
'--build-arg',
'VAULT_CA=${_VAULT_CA}',
'--build-arg',
'GITLAB_SSH_KEY=${_GITLAB_SSH_KEY}',
'-t', 'eu.gcr.io/${_PROJECT_ID}/tools:$_VERSION',
'.' ]
images:
- 'eu.gcr.io/${_PROJECT_ID}/tools:$_VERSION'
Google ์ปจํ
์ด๋ ๋ ์ง์คํธ๋ฆฌ(GCR)์ Google Cloud Build๋ฅผ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง๋ฅผ ๊ฒ์ํฉ๋๋ค.
cd docker
gcloud builds submit --config cloudbuild.yaml --substitutions \
_VAULT_CA="$(awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' ../infra/tls/ca.pem)",_VERSION="latest",_GITLAB_SSH_KEY="$GITLAB_SSH_KEY",_PROJECT_ID="$GCP_PROJECT_ID"
ํ์ฌ Kubernetes์์ Gitlab runner๋ฅผ ์ค์ ํ ์ ์์ต๋๋ค.ํฌ๋ฉง ์ฅ์ฐฉ 3๋ถํฐ ์์:
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
Gitlab runner๋ฅผ ์ํkubernetes ์ญํ ์ ๋ง๋ญ๋๋ค.
gitlab/dev/rbac-gitlab-demo-dev.yml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ksa-sw-devops-gitlab-deployer
namespace: sw-dev
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: role-ksa-sw-devops-gitlab-deployer
namespace: sw-dev
rules:
- apiGroups: [""] # "" indicates the sw API group
resources: ["pods", "pods/exec", "secrets"]
verbs: ["get", "list", "watch", "create", "patch", "delete"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rolebinding-ksa-sw-devops-gitlab-deployer
namespace: sw-dev
subjects:
- kind: ServiceAccount
name: ksa-sw-devops-gitlab-deployer # Name is case sensitive
apiGroup: ""
roleRef:
kind: Role #this must be Role or ClusterRole
name: role-ksa-sw-devops-gitlab-deployer # this must match the name of the Role or ClusterRole you wish to bind to
apiGroup: rbac.authorization.k8s.io
kubectl apply -f gitlab/dev/rbac-gitlab-demo-dev.yml
Gitlab Helm ํจํค์ง๋ฅผ ์ถ๊ฐํ๋ ค๋ฉด:
helm repo add gitlab https://charts.gitlab.io
๋ฌ๋ฆฌ๊ธฐ ์ ์ ๋ฑ๋ก ์ํจ๋ฅผ ๊ธฐ๋ฐ๋ก ์ ์ฅ:
kubectl create secret generic secret-sw-devops-gitlab-runner-tokens --from-literal=runner-token='' --from-literal=runner-registration-token='<DEMO_INFRA_REPO_RUNNER_TOKEN>' -n sw-dev
Kubernetes์ Gitlab Runner ๋ฐฐํฌ:
gitlab/dev/values.์๋ง๋ฅด
## Specify a imagePullPolicy
## 'Always' if imageTag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: IfNotPresent
## The GitLab Server URL (with protocol) that want to register the runner against
## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-register
##
gitlabUrl: https://gitlab.com/
## The registration token for adding new Runners to the GitLab server. This must
## be retrieved from your GitLab instance.
## ref: https://docs.gitlab.com/ee/ci/runners/
##
# runnerRegistrationToken: "<>"
## Unregister all runners before termination
##
## Updating the runner's chart version or configuration will cause the runner container
## to be terminated and created again. This may cause your Gitlab instance to reference
## non-existant runners. Un-registering the runner before termination mitigates this issue.
## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-unregister
##
unregisterRunners: true
## When stopping the runner, give it time to wait for its jobs to terminate.
##
## Updating the runner's chart version or configuration will cause the runner container
## to be terminated with a graceful stop request. terminationGracePeriodSeconds
## instructs Kubernetes to wait long enough for the runner pod to terminate gracefully.
## ref: https://docs.gitlab.com/runner/commands/#signals
terminationGracePeriodSeconds: 3600
## Set the certsSecretName in order to pass custom certificates for GitLab Runner to use
## Provide resource name for a Kubernetes Secret Object in the same namespace,
## this is used to populate the /etc/gitlab-runner/certs directory
## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates
##
#certsSecretName:
## Configure the maximum number of concurrent jobs
## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
##
concurrent: 10
## Defines in seconds how often to check GitLab for a new builds
## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
##
checkInterval: 30
## For RBAC support:
rbac:
create: false
## Run the gitlab-bastion container with the ability to deploy/manage containers of jobs
## cluster-wide or only within namespace
clusterWideAccess: false
## If RBAC is disabled in this Helm chart, use the following Kubernetes Service Account name.
##
serviceAccountName: ksa-sw-devops-gitlab-deployer
## Configure integrated Prometheus metrics exporter
## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server
##
metrics:
enabled: true
## Configuration for the Pods that the runner launches for each new job
##
runners:
# config: |
# [[runners]]
# [runners.kubernetes]
# image = "ubuntu:16.04"
## Default container image to use for builds when none is specified
##
image: ubuntu:18.04
## Specify whether the runner should be locked to a specific project: true, false. Defaults to true.
##
locked: false
## The amount of time, in seconds, that needs to pass before the runner will
## timeout attempting to connect to the container it has just created.
## ref: https://docs.gitlab.com/runner/executors/kubernetes.html
##
pollTimeout: 360
## Specify whether the runner should only run protected branches.
## Defaults to False.
##
## ref: https://docs.gitlab.com/ee/ci/runners/#protected-runners
##
protected: true
## Service Account to be used for runners
##
serviceAccountName: ksa-sw-dev-deployer
## Run all containers with the privileged flag enabled
## This will allow the docker:stable-dind image to run if you need to run Docker
## commands. Please read the docs before turning this on:
## ref: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind
##
privileged: false
## The name of the secret containing runner-token and runner-registration-token
secret: secret-sw-devops-gitlab-runner-tokens
## Namespace to run Kubernetes jobs in (defaults to 'default')
##
namespace: sw-dev
## Build Container specific configuration
##
builds:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Service Container specific configuration
##
services:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Helper Container specific configuration
##
helpers:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Specify the tags associated with the runner. Comma-separated list of tags.
##
## ref: https://docs.gitlab.com/ce/ci/runners/#using-tags
##
tags: "k8s-dev-runner"
## Node labels for pod assignment
##
nodeSelector:
nodepool: devops
## Specify node tolerations for CI job pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
nodeTolerations:
- key: "devops-reserved-pool"
operator: "Equal"
value: "true"
effect: "NoSchedule"
## Configure environment variables that will be injected to the pods that are created while
## the build is running. These variables are passed as parameters, i.e. `--env "NAME=VALUE"`,
## to `gitlab-runner register` command.
##
## Note that `envVars` (see below) are only present in the runner pod, not the pods that are
## created for each build.
##
## ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register
##
# env:
## Distributed runners caching
## ref: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-runners-caching
##
## If you want to use gcs based distributing caching:
## First of all you need to uncomment General settings and GCS settings sections.
# cache:
## General settings
# cacheType: gcs
# cachePath: "k8s_platform_sw_devops_runner"
# cacheShared: false
## GCS settings
# gcsBucketName:
## Use this line for access using access-id and private-key
# secretName: gcsaccess
## Use this line for access using google-application-credentials file
# secretName: google-application-credentials
## Helper container security context configuration
## Refer to https://docs.gitlab.com/runner/executors/kubernetes.html#using-security-context
# pod_security_context:
# run_as_non_root: true
# run_as_user: 100
# run_as_group: 100
# fs_group: 65533
# supplemental_groups: [101, 102]
helm install -n sw-dev sw-dev -f gitlab/dev/values.yaml gitlab/gitlab-runner
Gitlab ์คํ ํ๋ก๊ทธ๋จ์ ์ ์ ํ ๋ผ์ด์ผ์ค๊ฐ ์๋์ง ํ
์คํธํด ๋ณด๊ฒ ์ต๋๋ค.
kubectl run -it \
--image eu.gcr.io/${GCP_PROJECT_ID}/tools \
--serviceaccount ksa-sw-dev-deployer \
--namespace sw-dev \
gitlab-runner-auth-test
pod ์ฉ๊ธฐ์์ ์คํgcloud auth list
.์ดํpodkubectl delete pod gitlab-runner-auth-test -n sw-dev
๋ฅผ ์ญ์ ํ ์ ์์ต๋๋ค.
Gitlab ํ๋ก์ ํธ
gcloud beta secrets create gitlab-ssh-key --locations $GCP_REGION_DEFAULT --replication-policy user-managed
cd ~/.ssh
ssh-keygen -t rsa -b 4096
gcloud beta secrets versions add gitlab-ssh-key --data-file=./id_rsa
GITLAB_SSH_KEY=$(gcloud secrets versions access latest --secret=gitlab-ssh-key)
FROM gcr.io/google.com/cloudsdktool/cloud-sdk:alpine
RUN gcloud components install kustomize kpt kubectl alpha beta
# install argocd
RUN curl -sSL -o /usr/local/bin/argocd https://github.com/argoproj/argo-cd/releases/download/$(curl --silent "https://api.github.com/repos/argoproj/argo-cd/releases/latest" | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/')/argocd-linux-amd64
RUN chmod +x /usr/local/bin/argocd
# install vault
ENV VAULT_VERSION=1.6.0
RUN curl -sS "https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip" > vault.zip && \
unzip vault.zip -d /usr/bin && \
rm vault.zip
# install terraform
ENV TERRAFORM_VERSION=0.12.24
RUN curl -sS "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" > terraform.zip && \
unzip terraform.zip -d /usr/bin && \
rm terraform.zip
# Install sw cli
ENV SCW_VERSION=2.2.3
RUN curl -o /usr/local/bin/scw -L "https://github.com/scaleway/scaleway-cli/releases/download/v${SCW_VERSION}/scw-${SCW_VERSION}-linux-x86_64"
RUN chmod +x /usr/local/bin/scw
RUN vault -v
RUN terraform -v
RUN argocd
RUN gcloud -v
RUN scw version
ARG GITLAB_SSH_KEY
ARG VAULT_ADDR
ARG VAULT_CA
ARG VAULT_TOKEN
RUN echo -n $VAULT_CA > /home/ca.pem
RUN sed -i 's/\\n/\n/g' /home/ca.pem
ENV GITLAB_SSH_KEY=$GITLAB_SSH_KEY
ENV VAULT_ADDR=$VAULT_ADDR
ENV VAULT_TOKEN=$VAULT_TOKEN
ENV VAULT_CAPATH="/home/ca.pem"
steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build',
'--build-arg',
'VAULT_CA=${_VAULT_CA}',
'--build-arg',
'GITLAB_SSH_KEY=${_GITLAB_SSH_KEY}',
'-t', 'eu.gcr.io/${_PROJECT_ID}/tools:$_VERSION',
'.' ]
images:
- 'eu.gcr.io/${_PROJECT_ID}/tools:$_VERSION'
cd docker
gcloud builds submit --config cloudbuild.yaml --substitutions \
_VAULT_CA="$(awk 'NF {sub(/\r/, ""); printf "%s\\n",$0;}' ../infra/tls/ca.pem)",_VERSION="latest",_GITLAB_SSH_KEY="$GITLAB_SSH_KEY",_PROJECT_ID="$GCP_PROJECT_ID"
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
chmod 700 get_helm.sh
./get_helm.sh
apiVersion: v1
kind: ServiceAccount
metadata:
name: ksa-sw-devops-gitlab-deployer
namespace: sw-dev
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: role-ksa-sw-devops-gitlab-deployer
namespace: sw-dev
rules:
- apiGroups: [""] # "" indicates the sw API group
resources: ["pods", "pods/exec", "secrets"]
verbs: ["get", "list", "watch", "create", "patch", "delete"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: rolebinding-ksa-sw-devops-gitlab-deployer
namespace: sw-dev
subjects:
- kind: ServiceAccount
name: ksa-sw-devops-gitlab-deployer # Name is case sensitive
apiGroup: ""
roleRef:
kind: Role #this must be Role or ClusterRole
name: role-ksa-sw-devops-gitlab-deployer # this must match the name of the Role or ClusterRole you wish to bind to
apiGroup: rbac.authorization.k8s.io
kubectl apply -f gitlab/dev/rbac-gitlab-demo-dev.yml
helm repo add gitlab https://charts.gitlab.io
kubectl create secret generic secret-sw-devops-gitlab-runner-tokens --from-literal=runner-token='' --from-literal=runner-registration-token='<DEMO_INFRA_REPO_RUNNER_TOKEN>' -n sw-dev
## Specify a imagePullPolicy
## 'Always' if imageTag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
imagePullPolicy: IfNotPresent
## The GitLab Server URL (with protocol) that want to register the runner against
## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-register
##
gitlabUrl: https://gitlab.com/
## The registration token for adding new Runners to the GitLab server. This must
## be retrieved from your GitLab instance.
## ref: https://docs.gitlab.com/ee/ci/runners/
##
# runnerRegistrationToken: "<>"
## Unregister all runners before termination
##
## Updating the runner's chart version or configuration will cause the runner container
## to be terminated and created again. This may cause your Gitlab instance to reference
## non-existant runners. Un-registering the runner before termination mitigates this issue.
## ref: https://docs.gitlab.com/runner/commands/README.html#gitlab-runner-unregister
##
unregisterRunners: true
## When stopping the runner, give it time to wait for its jobs to terminate.
##
## Updating the runner's chart version or configuration will cause the runner container
## to be terminated with a graceful stop request. terminationGracePeriodSeconds
## instructs Kubernetes to wait long enough for the runner pod to terminate gracefully.
## ref: https://docs.gitlab.com/runner/commands/#signals
terminationGracePeriodSeconds: 3600
## Set the certsSecretName in order to pass custom certificates for GitLab Runner to use
## Provide resource name for a Kubernetes Secret Object in the same namespace,
## this is used to populate the /etc/gitlab-runner/certs directory
## ref: https://docs.gitlab.com/runner/configuration/tls-self-signed.html#supported-options-for-self-signed-certificates
##
#certsSecretName:
## Configure the maximum number of concurrent jobs
## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
##
concurrent: 10
## Defines in seconds how often to check GitLab for a new builds
## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section
##
checkInterval: 30
## For RBAC support:
rbac:
create: false
## Run the gitlab-bastion container with the ability to deploy/manage containers of jobs
## cluster-wide or only within namespace
clusterWideAccess: false
## If RBAC is disabled in this Helm chart, use the following Kubernetes Service Account name.
##
serviceAccountName: ksa-sw-devops-gitlab-deployer
## Configure integrated Prometheus metrics exporter
## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server
##
metrics:
enabled: true
## Configuration for the Pods that the runner launches for each new job
##
runners:
# config: |
# [[runners]]
# [runners.kubernetes]
# image = "ubuntu:16.04"
## Default container image to use for builds when none is specified
##
image: ubuntu:18.04
## Specify whether the runner should be locked to a specific project: true, false. Defaults to true.
##
locked: false
## The amount of time, in seconds, that needs to pass before the runner will
## timeout attempting to connect to the container it has just created.
## ref: https://docs.gitlab.com/runner/executors/kubernetes.html
##
pollTimeout: 360
## Specify whether the runner should only run protected branches.
## Defaults to False.
##
## ref: https://docs.gitlab.com/ee/ci/runners/#protected-runners
##
protected: true
## Service Account to be used for runners
##
serviceAccountName: ksa-sw-dev-deployer
## Run all containers with the privileged flag enabled
## This will allow the docker:stable-dind image to run if you need to run Docker
## commands. Please read the docs before turning this on:
## ref: https://docs.gitlab.com/runner/executors/kubernetes.html#using-docker-dind
##
privileged: false
## The name of the secret containing runner-token and runner-registration-token
secret: secret-sw-devops-gitlab-runner-tokens
## Namespace to run Kubernetes jobs in (defaults to 'default')
##
namespace: sw-dev
## Build Container specific configuration
##
builds:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Service Container specific configuration
##
services:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Helper Container specific configuration
##
helpers:
# cpuLimit: 200m
# memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
## Specify the tags associated with the runner. Comma-separated list of tags.
##
## ref: https://docs.gitlab.com/ce/ci/runners/#using-tags
##
tags: "k8s-dev-runner"
## Node labels for pod assignment
##
nodeSelector:
nodepool: devops
## Specify node tolerations for CI job pods assignment
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
nodeTolerations:
- key: "devops-reserved-pool"
operator: "Equal"
value: "true"
effect: "NoSchedule"
## Configure environment variables that will be injected to the pods that are created while
## the build is running. These variables are passed as parameters, i.e. `--env "NAME=VALUE"`,
## to `gitlab-runner register` command.
##
## Note that `envVars` (see below) are only present in the runner pod, not the pods that are
## created for each build.
##
## ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register
##
# env:
## Distributed runners caching
## ref: https://gitlab.com/gitlab-org/gitlab-runner/blob/master/docs/configuration/autoscale.md#distributed-runners-caching
##
## If you want to use gcs based distributing caching:
## First of all you need to uncomment General settings and GCS settings sections.
# cache:
## General settings
# cacheType: gcs
# cachePath: "k8s_platform_sw_devops_runner"
# cacheShared: false
## GCS settings
# gcsBucketName:
## Use this line for access using access-id and private-key
# secretName: gcsaccess
## Use this line for access using google-application-credentials file
# secretName: google-application-credentials
## Helper container security context configuration
## Refer to https://docs.gitlab.com/runner/executors/kubernetes.html#using-security-context
# pod_security_context:
# run_as_non_root: true
# run_as_user: 100
# run_as_group: 100
# fs_group: 65533
# supplemental_groups: [101, 102]
helm install -n sw-dev sw-dev -f gitlab/dev/values.yaml gitlab/gitlab-runner
kubectl run -it \
--image eu.gcr.io/${GCP_PROJECT_ID}/tools \
--serviceaccount ksa-sw-dev-deployer \
--namespace sw-dev \
gitlab-runner-auth-test
demo-app
, demo-env
๋ฐ demo-infra
๋ฅผ ์์ฑํฉ๋๋ค.v*
์ -app
์ ๋ณดํธ ๋ ์ด๋ธ-env
์ ์ถ๊ฐํฉ๋๋ค.Settings > Repository > Protected Tags
๋ก ์ด๋ํฉ๋๋ค.-infra
, -env
๋ฐ -app
์ Gitlab Runner๋ฅผ ์ฌ์ฉํฉ๋๋ค.Settings > CI / CD > Runners > Specific Runners > Enable for this project
๋ก ์ด๋ํฉ๋๋ค.Argo CD ๊ตฌ์ฑ
Argo CD๋ Kubernetes์ ์ ์ธ์ ์ธ GitOps ์ฐ์ ๋ฐฐ์ก ๋๊ตฌ์
๋๋ค.Kubernetes ํด๋ฌ์คํฐ์์ ๋ค์์ ๊ตฌ์ฑํฉ๋๋ค.
Kubernetes์ Argo CD๋ฅผ ์ค์นํ๋ ค๋ฉด:
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user="$(gcloud config get-value account)"
Argo CD API ์๋ฒ์ ์ก์ธ์คํ๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ ํจ์น ์๋น์ค๊ฐ ํ์ํฉ๋๋ค.
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
๊ธฐ๋ณธ ๊ตฌ์ฑ ์ธ์๋ ๋ค์์ด ํ์ํฉ๋๋ค.
kubectl create namespace argocd
kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user="$(gcloud config get-value account)"
kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "LoadBalancer"}}'
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-cm
namespace: argocd
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
data:
repositories: |
- url: <GIT_REPOSITORY_URL>
passwordSecret:
name: demo
key: password
usernameSecret:
name: demo
key: username
# add an additional local user with apiKey and login capabilities
# apiKey - allows generating API keys
# login - allows to login using UI
admin.enabled: "true"
accounts.demo.enabled: "true"
accounts.demo: login
accounts.gitlab.enabled: "true"
accounts.gitlab: apiKey
k8s/argocd rbac ์ค์ ๋.์๋ง๋ฅดapiVersion: v1
kind: ConfigMap
metadata:
name: argocd-rbac-cm
namespace: argocd
labels:
app.kubernetes.io/name: argocd-rbac-cm
app.kubernetes.io/part-of: argocd
data:
# policies
policy.default: role:readonly
policy.csv: |
p, role:demo-admins, applications, create, *, allow
p, role:demo-admins, applications, get, *, allow
p, role:demo-admins, applications, list, *, allow
p, role:demo-admins, clusters, create, *, allow
p, role:demo-admins, clusters, get, *, allow
p, role:demo-admins, clusters, list, *, allow
p, role:demo-admins, projects, create, *, allow
p, role:demo-admins, projects, get, *, allow
p, role:demo-admins, projects, list, *, allow
p, role:demo-admins, repositories, create, *, allow
p, role:demo-admins, repositories, get, *, allow
p, role:demo-admins, repositories, list, *, allow
g, gitlab, role:demo-admins
๋๋ฉ์ธ ์ด๋ฆ์ด ์์ผ๋ฉด ํ์ ๋๋ฉ์ธ์์ Argo CD์ ์ก์ธ์คํ ์ ์์ต๋๋ค.apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argocd-server-https-ingress
namespace: argocd
annotations:
ingress.kubernetes.io/ssl-redirect: "true"
kubernetes.io/ingress.global-static-ip-name: argocd
networking.gke.io/managed-certificates: argocd
spec:
backend:
serviceName: argocd-server
servicePort: http
rules:
- host: argocd.<PUBLIC_DNS_NAME>
http:
paths:
- path: /
backend:
serviceName: argocd-server
servicePort: http
k8s/argocd ์๋ฒ ๊ด๋ฆฌ ์ธ์ฆ์์
๋๋ค.์๋ง๋ฅดapiVersion: networking.gke.io/v1beta1
kind: ManagedCertificate
metadata:
name: argocd
namespace: argocd
spec:
domains:
- argocd.<PUBLIC_DNS_NAME>
k8s/argocd ์๋ฒ.ํspec:
template:
spec:
containers:
- command:
- argocd-server
- --staticassets
- /shared/app
- --insecure
name: argocd-server
์ฐ๋ฆฌ๋ ์ ์ ํ์ ์ด ๋ชฉ๋ก๋ค์ ์ ์ฉํ ๊ฒ์ด๋ค.Argo CD
app-demo-dev
์ ํ๋ฆฌ์ผ์ด์
์ ๊ตฌ์ฑํฉ๋๋ค.cd k8s
export GITLAB_USERNAME_SECRET=<GITLAB_USERNAME_SECRET>
export GITLAB_CI_PUSH_TOKEN=<GITLAB_CI_PUSH_TOKEN>
kubectl create secret generic demo -n argocd \
--from-literal=username=$GITLAB_USERNAME_SECRET \
--from-literal=password=$GITLAB_CI_PUSH_TOKEN
๋ณด์๋ค์ํผ ์ฌ์ฉ ๊ฐ๋ฅํ ํด๋ผ์ฐ๋ DNS๊ฐ ์์ผ๋ฉด Argo CD์ ์
๊ตฌ๋ฅผ ๋ง๋ค๊ณ ์ธ๋ถ ์ ์ IP+์ํ ๊ด๋ฆฌ SSL ์ธ์ฆ์๋ฅผ ์ถ๊ฐํ ์ ์์ต๋๋ค.gcloud compute addresses create argocd --global
gcloud dns record-sets transaction start --zone=$PUBLIC_DNS_ZONE_NAME
gcloud dns record-sets transaction add $(gcloud compute addresses list --filter=name=argocd --format="value(ADDRESS)") --name=argocd.$PUBLIC_DNS_NAME. --ttl=300 --type=A --zone=$PUBLIC_DNS_ZONE_NAME
gcloud dns record-sets transaction execute --zone=$PUBLIC_DNS_ZONE_NAME
ArgoCD ์๋น์ค๋ฅผ ์ํด์๋ GCP ๋ก๋ ๋ฐธ๋ฐ์์ IP ๋ฒ์๋ฅผ ํ์ฉํด์ผ ํฉ๋๋ค.gcloud compute firewall-rules create fw-allow-health-checks \
--network=vpc \
--action=ALLOW \
--direction=INGRESS \
--source-ranges=35.191.0.0/16,130.211.0.0/22 \
--rules=tcp
์ด์ Argo CD์ ๋ํ ํฌํธ ๋ฆฌ์์ค๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.kubectl patch svc argocd-server -n argocd -p '{"spec": {"type": "NodePort"}}'
kubectl annotate svc argocd-server -n argocd \
cloud.google.com/neg='{"ingress": true}'
sed -i "s/<PUBLIC_DNS_NAME>/${PUBLIC_DNS_NAME}/g" argocd-server-managed-certificate.yaml
kubectl apply -f argocd-server-managed-certificate.yaml
sed -i "s/<PUBLIC_DNS_NAME>/${PUBLIC_DNS_NAME}/g" argocd-server-ingress.yaml
kubectl apply -f argocd-server-ingress.yaml
kubectl patch deployment argocd-server -n argocd -p "$(cat argocd-server.patch.yaml)"
ArgoCD ์๋ฒ ํฌํธ์ ์์ฑํ ํ CLI๋ฅผ ์ฌ์ฉํ์ฌ ๋ก๊ทธ์ธํฉ๋๋ค.kubectl wait ingress argocd-server-https-ingress --for=condition=available --timeout=600s -n argocd
ARGOCD_ADDR="argocd.${PUBLIC_DNS_NAME}"
# get default password
ARGOCD_DEFAULT_PASSWORD=$(kubectl get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2)
argocd login $ARGOCD_ADDR --grpc-web
# change password
argocd account update-password
# for any issue, reset the password, edit the argocd-secret secret and update the admin.password field with a new bcrypt hash. You can use a site like https://www.browserling.com/tools/bcrypt to generate a new hash.
kubectl -n argocd patch secret argocd-secret \
-p '{"stringData": {
"admin.password": "<BCRYPT_HASH>",
"admin.passwordMtime": "'$(date +%FT%T%Z)'"
}}'
๋ผ์ด๋ธ๋ฌ๋ฆฌ, ์ฌ์ฉ์ ๋ฐ RBAC๋ฅผ ๋ง๋ค๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ญ์์ค.sed -i "s,<GIT_REPOSITORY_URL>,$GIT_REPOSITORY_URL,g" argocd-configmap.yaml
kubectl apply -n argocd -f argocd-configmap.yaml
kubectl apply -n argocd -f argocd-rbac-configmap.yaml
ํ๋ ์ ํ
์ด์
์ฌ์ฉ์ ์ํธ๋ฅผ ๋ณ๊ฒฝํ๋ ค๋ฉด:argocd account update-password --account demo --current-password "${ARGOCD_DEFAULT_PASSWORD}" --new-password "<NEW_PASSWORD>"
Argo CD์ ๋ํ ์ก์ธ์ค ํ ํฐ์ ์์ฑํ๋ ค๋ฉด:AROGOCD_TOKEN=$(argocd account generate-token --account gitlab)
Secret Manager์ ArgoCD ํ ํฐ์ ์ ์ฅํ๋ ค๋ฉด:gcloud beta secrets create argocd-token --locations $GCP_REGION_DEFAULT --replication-policy user-managed
echo -n "${AROGOCD_TOKEN}" | gcloud beta secrets versions add argocd-token --data-file=-
ํต๋ก.
scw init
Kapsule์ด Google ์ปจํ
์ด๋ ๋ ์ง์คํธ๋ฆฌ์ ์ก์ธ์คํ ์ ์๋๋ก ์๋น์ค ๊ณ์ ์ ๋ง๋ค๋ ค๋ฉด ๋ค์๊ณผ ๊ฐ์ด ํ์ญ์์ค.
SW_SA_EMAIL=$(gcloud iam service-accounts --format='value(email)' create sw-gcr-auth-ro)
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} --member serviceAccount:$SW_SA_EMAIL --role roles/storage.objectViewer
๊ฒฐ๋ก
DevOps ํ๋ซํผ์ ์ด์ Scaleway์ ๋ฆฌ์์ค๋ฅผ ๋ฐฐํฌํ ์ ์์ต๋๋ค.์ด ๋ฌธ์์์๋ Google Cloud์ ๋ฑ๋ก๋ Gitlab Runner์์ ๋์จ Kapsule ํด๋ฌ์คํฐ๋ฅผ Scaleway์ ๋ฐฐ์นํ๋ ๋ฐฉ๋ฒ์ ์ดํด๋ด
๋๋ค.
๋ฌธ์
[1] https://github.com/sethvargo/vault-on-gke
[2] https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
scw init
SW_SA_EMAIL=$(gcloud iam service-accounts --format='value(email)' create sw-gcr-auth-ro)
gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} --member serviceAccount:$SW_SA_EMAIL --role roles/storage.objectViewer
DevOps ํ๋ซํผ์ ์ด์ Scaleway์ ๋ฆฌ์์ค๋ฅผ ๋ฐฐํฌํ ์ ์์ต๋๋ค.์ด ๋ฌธ์์์๋ Google Cloud์ ๋ฑ๋ก๋ Gitlab Runner์์ ๋์จ Kapsule ํด๋ฌ์คํฐ๋ฅผ Scaleway์ ๋ฐฐ์นํ๋ ๋ฐฉ๋ฒ์ ์ดํด๋ด ๋๋ค.
๋ฌธ์
[1] https://github.com/sethvargo/vault-on-gke
[2] https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
Reference
์ด ๋ฌธ์ ์ ๊ดํ์ฌ(์ธ์ฐ๋ค๐บ๐ธ ๐ช๐บ ๊ตฌ๋ฆ์ด ๋ง์ DevOps ํ๋ซํผ), ์ฐ๋ฆฌ๋ ์ด๊ณณ์์ ๋ ๋ง์ ์๋ฃ๋ฅผ ๋ฐ๊ฒฌํ๊ณ ๋งํฌ๋ฅผ ํด๋ฆญํ์ฌ ๋ณด์๋ค https://dev.to/stack-labs/how-to-build-step-by-step-a-multi-cloud-infrastructure-with-google-cloud-and-scaleway-elements-part-3-5fg9ํ ์คํธ๋ฅผ ์์ ๋กญ๊ฒ ๊ณต์ ํ๊ฑฐ๋ ๋ณต์ฌํ ์ ์์ต๋๋ค.ํ์ง๋ง ์ด ๋ฌธ์์ URL์ ์ฐธ์กฐ URL๋ก ๋จ๊ฒจ ๋์ญ์์ค.
์ฐ์ํ ๊ฐ๋ฐ์ ์ฝํ ์ธ ๋ฐ๊ฒฌ์ ์ ๋ (Collection and Share based on the CC Protocol.)
์ข์ ์นํ์ด์ง ์ฆ๊ฒจ์ฐพ๊ธฐ
๊ฐ๋ฐ์ ์ฐ์ ์ฌ์ดํธ ์์ง
๊ฐ๋ฐ์๊ฐ ์์์ผ ํ ํ์ ์ฌ์ดํธ 100์ ์ถ์ฒ ์ฐ๋ฆฌ๋ ๋น์ ์ ์ํด 100๊ฐ์ ์์ฃผ ์ฌ์ฉํ๋ ๊ฐ๋ฐ์ ํ์ต ์ฌ์ดํธ๋ฅผ ์ ๋ฆฌํ์ต๋๋ค