Быстрый старт

На этой странице описана последовательность действий по началу работы с провайдером Terraform для Cloud.

Порядок работы с Terraform

  1. Установите Terraform.

    См.также

    Инструкция по установке (en) на официальном сайте.

    Terraform можно устанавливать и запускать в облаке или локально. На этой странице описаны шаги для локальной установки.

  2. Создайте каталог на компьютере, например terraform-sbercloud.

  3. Внутри этого каталога создайте конфигурационный файл с расширением .tf, например main.tf.

  4. В конфигурационном файле опишите целевую инфраструктуру в виде объектов кода на языке HCL.

  5. Войдите в консоль из под своего IAM-пользователя и сгенерируйте ключи авторизации Access Key и Secret Key.

  6. Подключите провайдер Terraform для Cloud — в начало конфигурационного файла добавьте код инициализации провайдера:

    terraform {
      required_providers {
        sbercloud = {
          source  = "sbercloud-terraform/sbercloud" # Initialize SberCloud provider
        }
      }
    }
    
    # Configure SberCloud provider
    provider "sbercloud" {
      auth_url = "https://iam.ru-moscow-1.hc.sbercloud.ru/v3" # Authorization address
      region   = "ru-moscow-1" # The region where the cloud infrastructure will be deployed
    
      # Authorization keys
      access_key = var.access_key
      secret_key = var.secret_key
    }
    
  7. Сохраните конфигурационный файл.

  8. Инициализируйте проект — выполните команду init:

    terraform init
    
  9. Убедитесь, что запланированные действия соответствуют целевому состоянию инфраструктуры. Чтобы посмотреть запланированные изменения, выполните команду plan:

    terraform plan
    

    Terraform проанализирует код и покажет, сколько объектов будет создано и какими свойствами они будут обладать. При необходимости на этом этапе можно скорректировать конфигурацию инфраструктуры.

  10. Запустите выполнение кода командой apply:

    terraform apply
    

    Для подтверждения введите «yes» и нажмите Enter. Terraform начнет создавать объекты, которые были описаны и запланированы на предыдущих этапах.

    Примечание

    Чтобы создавать объекты без дополнительного подтверждения, используйте опцию -auto-approve:

    terraform apply -auto-approve
    
  11. Если необходимо удалить созданные с помощью Terraform ресурсы, выполните команду destroy:

    terraform destroy
    

    Для подтверждения введите «yes» и нажмите Enter.

    Внимание

    Команда destroy удаляет все созданные Terraform ресурсы без возможности восстановления.

Пример конфигурационного файла

При выполнении кода из этого примера Terraform разворачивает тестовую и продуктивную среду, которые включают:

  • сеть;

  • виртуальные машины для фронтенда и бэкенда приложения;

  • группы безопасности;

  • NAT-шлюз;

  • белые IP-адреса;

  • балансировщик нагрузки.

Показать код

#
# Declare all required variables
#
variable "access_key" {
description = "Access Key to access SberCloud"
sensitive   = true
}

variable "secret_key" {
description = "Secret Key to access SberCloud"
sensitive   = true
}

variable "dev_project_name" {
description = "Project where to deploy Dev infrastructure"
}

variable "prod_project_name" {
description = "Project where to deploy Prod infrastructure"
}

variable "cce_node_pass" {
description = "CCE Node root password"
sensitive   = true
}

#
# Initialize Terraform and configure SberCloud provider
#
terraform {
  required_providers {
    sbercloud = {
      source  = "sbercloud-terraform/sbercloud"
      version = "1.2.0"
    }
    kubernetes = {
      source  = "hashicorp/kubernetes"
      version = "2.0.3"
    }
  }

  backend "pg" {}
}

provider "sbercloud" {
  auth_url = "https://iam.ru-moscow-1.hc.sbercloud.ru/v3"
  region   = "ru-moscow-1"

  project_name = terraform.workspace == "default" ? var.dev_project_name : var.prod_project_name

  access_key = var.access_key
  secret_key = var.secret_key
}

# Get the list of Availability Zones (AZ)
data "sbercloud_availability_zones" "list_of_az" {}

# Get flavor name for "frontend" ECS and CCE workers
data "sbercloud_compute_flavors" "flavor_name" {
  availability_zone = data.sbercloud_availability_zones.list_of_az.names[0]
  performance_type  = "normal"
  cpu_core_count    = 2
  memory_size       = 4
}

#
# Define local variables
#
locals {
  number_of_az       = length(data.sbercloud_availability_zones.list_of_az.names)
  cce_master_flavor  = terraform.workspace == "default" ? "cce.s1.small" : "cce.s2.small"
  cce_multi_az       = terraform.workspace == "default" ? "false" : "true"
  cce_workers_count  = terraform.workspace == "default" ? 1 : 3
  ecs_frontend_count = terraform.workspace == "default" ? 2 : 4

  # Below are rules for Security Group
  # One can put them into terraform.tfvars as well
  rules = {
    http-rule = {
      description = "Allow HTTP from anywhere",
      protocol    = "tcp",
      port        = 80,
      source      = "0.0.0.0/0"
    },
    ssh-rule = {
      description = "Allow SSH from only one source",
      protocol    = "tcp",
      port        = 22,
      source      = "10.20.30.40/32"
    }
  }
}

#
# Code starts
#

# Create VPC
resource "sbercloud_vpc" "vpc_01" {
  name = "vpc-terraform"
  cidr = "10.33.0.0/16"
}

# Create two subnets
resource "sbercloud_vpc_subnet" "subnet_01" {
  name       = "subnet-front"
  cidr       = "10.33.10.0/24"
  gateway_ip = "10.33.10.1"

  primary_dns   = "100.125.13.59"
  secondary_dns = "8.8.8.8"

  vpc_id = sbercloud_vpc.vpc_01.id
}

resource "sbercloud_vpc_subnet" "subnet_02" {
  name       = "subnet-k8s"
  cidr       = "10.33.20.0/24"
  gateway_ip = "10.33.20.1"

  primary_dns   = "100.125.13.59"
  secondary_dns = "8.8.8.8"

  vpc_id = sbercloud_vpc.vpc_01.id
}

# Create security group
resource "sbercloud_networking_secgroup" "sg_01" {
  name        = "sg-front"
  description = "Security group for ELB"
}

# Create a security group rule, which allows HTTP traffic
resource "sbercloud_networking_secgroup_rule" "sg_rules" {
  for_each = local.rules

  direction        = "ingress"
  ethertype        = "IPv4"
  description      = each.value.description
  protocol         = each.value.protocol
  port_range_min   = each.value.port
  port_range_max   = each.value.port
  remote_ip_prefix = each.value.source

  security_group_id = sbercloud_networking_secgroup.sg_01.id
}

# Create 3 Elastic IPs (EIP): for CCE cluster API server, for ELB, for NAT
resource "sbercloud_vpc_eip" "cce_eip" {
  publicip {
    type = "5_bgp"
  }
  bandwidth {
    name        = "cce_bandwidth"
    size        = 4
    share_type  = "PER"
    charge_mode = "bandwidth"
  }
}

resource "sbercloud_vpc_eip" "nat_eip" {
  publicip {
    type = "5_bgp"
  }
  bandwidth {
    name        = "nat_bandwidth"
    size        = 4
    share_type  = "PER"
    charge_mode = "bandwidth"
  }
}

resource "sbercloud_vpc_eip" "elb_eip" {
  publicip {
    type = "5_bgp"
  }
  bandwidth {
    name        = "elb_bandwidth"
    size        = 4
    share_type  = "PER"
    charge_mode = "bandwidth"
  }
}

# Create NAT Gateway
resource "sbercloud_nat_gateway" "nat_01" {
  name        = "nat-tf"
  description = "NAT Gateway for CCE workers"
  spec        = "1"
  vpc_id      = sbercloud_vpc.vpc_01.id
  subnet_id   = sbercloud_vpc_subnet.subnet_01.id
}

# Create two SNAT rules: one for each subnet
resource "sbercloud_nat_snat_rule" "snat_subnet_01" {
  nat_gateway_id = sbercloud_nat_gateway.nat_01.id
  subnet_id      = sbercloud_vpc_subnet.subnet_01.id
  floating_ip_id = sbercloud_vpc_eip.nat_eip.id
}

resource "sbercloud_nat_snat_rule" "snat_subnet_02" {
  nat_gateway_id = sbercloud_nat_gateway.nat_01.id
  subnet_id      = sbercloud_vpc_subnet.subnet_02.id
  floating_ip_id = sbercloud_vpc_eip.nat_eip.id
}

# Create CCE cluster
resource "sbercloud_cce_cluster" "cce_01" {
  name = "cluster-backend"

  flavor_id = local.cce_master_flavor

  container_network_type = "overlay_l2"
  multi_az               = local.cce_multi_az
  eip                    = sbercloud_vpc_eip.cce_eip.address

  vpc_id    = sbercloud_vpc.vpc_01.id
  subnet_id = sbercloud_vpc_subnet.subnet_02.id
}

# Create CCE worker node(s)
resource "sbercloud_cce_node" "cce_01_node" {
  count = local.cce_workers_count

  cluster_id        = sbercloud_cce_cluster.cce_01.id
  name              = "k8s-worker-${count.index}"
  flavor_id         = data.sbercloud_compute_flavors.flavor_name.ids[0]
  availability_zone = data.sbercloud_availability_zones.list_of_az.names[count.index % local.number_of_az]
  password          = var.cce_node_pass

  root_volume {
    size       = 50
    volumetype = "SAS"
  }

  data_volumes {
    size       = 100
    volumetype = "SAS"
  }

  tags = {
    environment = terraform.workspace == "default" ? "dev" : terraform.workspace
  }
}

#
# Configure Kubernetes Terraform provider
#
provider "kubernetes" {
  host = join("", ["https://", sbercloud_vpc_eip.cce_eip.address, ":5443"])

  client_certificate     = base64decode(sbercloud_cce_cluster.cce_01.certificate_users[0].client_certificate_data)
  client_key             = base64decode(sbercloud_cce_cluster.cce_01.certificate_users[0].client_key_data)
  cluster_ca_certificate = base64decode(sbercloud_cce_cluster.cce_01.certificate_clusters[0].certificate_authority_data)
}

# Deploy nginx application (as k8s deployment)
resource "kubernetes_deployment" "deploy_backend" {
  metadata {
    name = "app-backend"
    labels = {
      test = "AppBackend"
    }
  }

  spec {
    replicas = 2

    selector {
      match_labels = {
      test = "AppBackend"
      }
    }

    template {
      metadata {
      labels = {
        test = "AppBackend"
      }
      }

      spec {
      container {
        image = "nginx:1.7.8"
        name  = "main-container"

        resources {
          limits = {
          cpu    = "0.5"
          memory = "512Mi"
          }
          requests = {
          cpu    = "250m"
          memory = "50Mi"
          }
        }
      }
      }
    }
  }
}

# Create k8s service (LoadBalancer)
resource "kubernetes_service" "service_backend" {
  metadata {
    name = "elb-terraform"
    annotations = {
      "kubernetes.io/elb.class"             = "union"
      "kubernetes.io/session-affinity-mode" = "SOURCE_IP"
      "kubernetes.io/elb.subnet-id"         = sbercloud_vpc_subnet.subnet_02.id
      "kubernetes.io/elb.enterpriseID"      = "default"
      "kubernetes.io/elb.autocreate"        = "{\"type\":\"public\",\"bandwidth_name\":\"cce-terraform-service\", \"bandwidth_chargemode\":\"traffic\",\"bandwidth_size\":2,\"bandwidth_sharetype\":\"PER\",\"eip_type\":\"5_bgp\",  \"name\":\"cce-loadbalancer\"}"
    }
  }
  spec {
    external_traffic_policy = "Local"
    selector = {
      test = kubernetes_deployment.deploy_backend.metadata.0.labels.test
    }
    port {
      port        = 80
      target_port = 80
    }

    type = "LoadBalancer"
  }
}

#
# Create "frontend" on ECS
#
# Get the latest Ubuntu image
data "sbercloud_images_image" "ubuntu_image" {
  name        = "Ubuntu 20.04 server 64bit"
  most_recent = true
}

# Create Key Pair to access frontend nodes
resource "sbercloud_compute_keypair" "ecs_node_keypair" {
  name       = "ecs-node-keypair-${terraform.workspace}"
  public_key = "place_your_public_key_here"
}

# Create ECS(s) to host "frontend"
resource "sbercloud_compute_instance" "ecs_frontend" {
  count = local.ecs_frontend_count

  name              = "frontend-${count.index}"
  image_id          = data.sbercloud_images_image.ubuntu_image.id
  flavor_id         = data.sbercloud_compute_flavors.flavor_name.ids[0]
  security_groups   = [sbercloud_networking_secgroup.sg_01.name]
  availability_zone = data.sbercloud_availability_zones.list_of_az.names[count.index % local.number_of_az]
  key_pair          = sbercloud_compute_keypair.ecs_node_keypair.name
  user_data         = "#!/bin/bash\napt-get update && apt-get -y install nginx && sed -i.bak \"s/nginx\\!/$(hostname)/\" /var/  www/html/index.nginx-debian.html"

  system_disk_type = "SAS"
  system_disk_size = 16

  network {
    uuid = sbercloud_vpc_subnet.subnet_01.id
  }

  tags = {
    environment = terraform.workspace == "default" ? "dev" : terraform.workspace
  }

  depends_on = [
    sbercloud_nat_snat_rule.snat_subnet_01
  ]
}

# Create ELB
resource "sbercloud_lb_loadbalancer" "elb_01" {
  name          = "elb-frontend"
  vip_subnet_id = sbercloud_vpc_subnet.subnet_01.subnet_id
}

# Attach EIP to ELB
resource "sbercloud_networking_eip_associate" "elb_eip_associate" {
  public_ip = sbercloud_vpc_eip.elb_eip.address
  port_id   = sbercloud_lb_loadbalancer.elb_01.vip_port_id
}

# Create ELB listener
resource "sbercloud_lb_listener" "frontend_listener_80" {
  name            = "Frontend listener"
  protocol        = "HTTP"
  protocol_port   = 80
  loadbalancer_id = sbercloud_lb_loadbalancer.elb_01.id
}

# Create ECS backend group for ELB
resource "sbercloud_lb_pool" "backend_pool" {
  name        = "Backend servers group for frontend"
  protocol    = "HTTP"
  lb_method   = "ROUND_ROBIN"
  listener_id = sbercloud_lb_listener.frontend_listener_80.id
}

# Create ELB health check policy
resource "sbercloud_lb_monitor" "elb_health_check" {
  name           = "Health check for Frontend"
  type           = "HTTP"
  url_path       = "/"
  expected_codes = "200-202"
  delay          = 10
  timeout        = 5
  max_retries    = 3
  pool_id        = sbercloud_lb_pool.backend_pool.id
}

# Add ECS(s) to the backend server group
resource "sbercloud_lb_member" "backend_server" {
  count = local.ecs_frontend_count

  address = sbercloud_compute_instance.ecs_frontend[count.index].access_ip_v4

  protocol_port = 80
  pool_id       = sbercloud_lb_pool.backend_pool.id
  subnet_id     = sbercloud_vpc_subnet.subnet_01.subnet_id

  depends_on = [
    sbercloud_lb_monitor.elb_health_check
  ]
}