User Tools

Site Tools


технология_terraform

Технология Terraform

Установка

# VER=1.9.2

# wget https://mirror.selectel.ru/3rd-party/hashicorp-releases/terraform/$VER/terraform_${VER}_linux_amd64.zip

# unzip terraform_${VER}_linux_amd64.zip

# mv terraform /usr/local/bin/

# terraform version

terraform-provider-libvirt

Установка

~# wget https://github.com/dmacvicar/terraform-provider-libvirt/releases/download/v0.7.6/terraform-provider-libvirt_0.7.6_linux_amd64.zip

~# unzip terraform-provider-libvirt_0.7.6_linux_amd64.zip

~# mkdir -p ~/.local/share/terraform/plugins/registry.terraform.io/dmacvicar/libvirt/0.7.6/linux_amd64

~# mv ./terraform-provider-libvirt_v0.7.6 ~/.local/share/terraform/plugins/registry.terraform.io/dmacvicar/libvirt/0.7.6/linux_amd64/terraform-provider-libvirt

~# mkdir -p nodes; cd $_

Инициализация проекта

~/nodes# cat node1.tf
terraform {
  required_providers {
    libvirt = {
      source  = "dmacvicar/libvirt"
      version = "0.7.6"
    }
  }
}
~/nodes# terraform init

~/nodes# find .terraform/
...
.terraform/providers/registry.terraform.io/dmacvicar/libvirt/0.7.6/linux_amd64
...

Простой проект

~/nodes# cat meta-data.tftpl
local-hostname: ${name}
~/nodes# cp network-config network-config.tftpl

~/nodes# cat network-config.tftpl
...
  ens3:
    dhcp4: true
#    dhcp4: false
#    addresses:
#      - 192.168.X.${ip}/24
...
~/nodes# cat node1.tf
...

provider "libvirt" {
  uri = "qemu:///system"
}

resource "libvirt_pool" "pool-nodes" {
  name = "pool-nodes"
  type = "dir"
  path = "/var/lib/libvirt/pool-nodes"
}

resource "libvirt_volume" "image" {
  name   = "linux-cloudimg"
  pool   = libvirt_pool.pool-nodes.name
#  source = "/root/noble-server-cloudimg-amd64.img"
  source = "/root/debian-12-generic-amd64.qcow2"
}

resource "libvirt_volume" "disk-node1" {
  name           = "disk-node1"
  pool           = libvirt_pool.pool-nodes.name
  base_volume_id = libvirt_volume.image.id
  size           = 10 * 1024 * 1024 * 1024
}

#data "template_file" "user-data" {
#...
#}

locals {
  meta-data      = templatefile("${path.module}/meta-data.tftpl", { name = "node1" })
  user-data      = templatefile("${path.module}/user-data", {})
  network-config = templatefile("${path.module}/network-config.tftpl", { ip = 201 })
}

resource "libvirt_cloudinit_disk" "commoninit" {
  name           = "commoninit.iso"
  meta_data      = local.meta-data
  user_data      = local.user-data
  network_config = local.network-config
  pool           = libvirt_pool.pool-nodes.name
}

resource "libvirt_domain" "domain-node1" {
  name   = "domain-node1"
  memory = "2048"
  vcpu   = 2

  cloudinit = libvirt_cloudinit_disk.commoninit.id

  qemu_agent = true
  autostart  = true

  network_interface {
    bridge         = "br0"
    wait_for_lease = true
  }

  timeouts {
    create = "30m"
  }

  console {
    type        = "pty"
    target_port = "0"
    target_type = "serial"
  }
  graphics {
    type        = "vnc"
    listen_type = "address"
    listen_address = "0.0.0.0"
    autoport    = "true"
  }

  disk {
    volume_id = libvirt_volume.disk-node1.id
  }
}

output "vm_name" {
  value       = libvirt_domain.domain-node1.name
  description = "VM name"
}

output "vm_ip" {
  value       = libvirt_domain.domain-node1.network_interface[0].addresses.0
  description = "Interface IPs"
}

Управление проектом

~/nodes# terraform validate

~/nodes# terraform plan

~/nodes# terraform apply #-auto-approve

~/nodes# virsh console --domain domain-node1

~/nodes# virsh vncdisplay domain-node1

~/nodes# terraform state list

~/nodes# terraform state show libvirt_domain.domain-node1
...
    network_interface {
            addresses
...

~/nodes# terraform destroy #-auto-approve

~/nodes# ### rm -r .terraform .terraform.lock.hcl terraform.tfstate terraform.tfstate.backup

Multi-Machine проект

home-computer:~/nodes# cat variables.tf
variable "domains" {
  description = "List of VMs with specified parameters"
  type = list(object({
    name = string,
    ip   = string
  }))
}
home-computer:~/nodes# cat terraform.tfvars
domains = [
  {
    name = "node1"
    ip   = "201"
  },
  {
    name = "node2"
    ip   = "202"
  },
  {
    name = "node3"
    ip   = "203"
  }
]
home-computer:~/nodes# cat meta-data.tftpl
local-hostname: ${name}
home-computer:~/nodes# ip r
home-computer:~/nodes# cat /etc/resolv.conf

home-computer:~/nodes# cat network-config.tftpl
version: 2
ethernets:
  ens3:
    dhcp4: false
    addresses:
      - 192.168.X.${ip}/24
    routes:
      - to: default
        via: 192.168.X.1
    nameservers:
      search: [isp.un, corpX.un]
      addresses: [192.168.X.10]
home-computer:~/nodes# mv node1.tf nodes.tf

home-computer:~/nodes# cat nodes.tf
...
resource "libvirt_volume" "disk-noden" {
  count          = length(var.domains)

  name           = "disk-${var.domains[count.index].name}"
  pool           = libvirt_pool.pool-nodes.name
  base_volume_id = libvirt_volume.image.id
  size           = 10 * 1024 * 1024 * 1024
}

locals {
  user-data = templatefile("${path.module}/user-data", {})
  meta-data = [
    for n in range(length(var.domains)) : templatefile("${path.module}/meta-data.tftpl", {
      name = "${var.domains[n].name}"
    })
  ]
  network-config = [
    for n in range(length(var.domains)) : templatefile("${path.module}/network-config.tftpl", {
      ip = "${var.domains[n].ip}"
    })
  ]
}

resource "libvirt_cloudinit_disk" "commoninit" {
  count          = length(var.domains)
  name           = "commoninit-${var.domains[count.index].name}.iso"
  meta_data      = local.meta-data[count.index]
  user_data      = local.user-data
  network_config = local.network-config[count.index]
  pool           = libvirt_pool.pool-nodes.name
}

resource "libvirt_domain" "domain-noden" {
  count = length(var.domains)

  name   = "domain-${var.domains[count.index].name}"
  memory = "2048"
  vcpu   = 2

  cloudinit = libvirt_cloudinit_disk.commoninit[count.index].id

  qemu_agent = true
  autostart = true

  network_interface {
    bridge         = "br0"
    wait_for_lease = true
  }

  timeouts {
    create = "30m"
  }

  console {
    type        = "pty"
    target_port = "0"
    target_type = "serial"
  }
  graphics {
    type           = "vnc"
    listen_type    = "address"
    listen_address = "0.0.0.0"
    autoport       = "true"
  }

  disk {
    volume_id = libvirt_volume.disk-noden[count.index].id
  }
}

output "vms_info" {
  description = "General information about created VMs"
  value = [
    for vm in libvirt_domain.domain-noden : {
      id = vm.name
      ip = vm.network_interface[0].addresses.0
    }
  ]
}

Провайдеры Selectel и OpenStack

# cat ~/.terraformrc
provider_installation {
  network_mirror {
    url = "https://mirror.selectel.ru/3rd-party/terraform-registry/"
    include = ["registry.terraform.io/*/*"]
  }
  direct {
    exclude = ["registry.terraform.io/*/*"]
  }
}
home-computer:~# mkdir -p selectel; cd $_

home-computer:~/selectel# cat main.tf
terraform {
  required_providers {
    selectel = {
      source  = "selectel/selectel"
      version = "5.1.0"
    }
    openstack = {
      source  = "terraform-provider-openstack/openstack"
      version = "2.0.0"
    }
  }
}

provider "selectel" {
  domain_name = "NNNNNN"
  username    = "Cerys"
  password    = "xxxxxxxxxxx"
}

resource "selectel_vpc_project_v2" "project_1" {
  name = "project"
}

resource "selectel_iam_serviceuser_v1" "serviceuser_1" {
  name     = "prCerys"
  password = "xxxxxxxxxxx"
  role {
    role_name  = "member"
    scope      = "project"
    project_id = selectel_vpc_project_v2.project_1.id
  }
}

provider "openstack" {
  auth_url    = "https://cloud.api.selcloud.ru/identity/v3"
  domain_name = "NNNNNN"
  tenant_id   = selectel_vpc_project_v2.project_1.id
  user_name   = selectel_iam_serviceuser_v1.serviceuser_1.name
  password    = selectel_iam_serviceuser_v1.serviceuser_1.password
  region      = "ru-9"
}

resource "selectel_vpc_keypair_v2" "keypair_1" {
  name       = "keypair"
  public_key = file("~/.ssh/id_rsa.pub")
  user_id    = selectel_iam_serviceuser_v1.serviceuser_1.id
}

resource "openstack_compute_flavor_v2" "flavor_1" {
#  name      = "custom-flavor-with-network-volume"
  name      = "custom-flavor-with-network-volume-1234"
  vcpus     = 2
  ram       = 2048
  disk      = 0
  is_public = false

  lifecycle {
    create_before_destroy = true
  }
}

resource "openstack_networking_network_v2" "network_1" {
  name           = "private-network"
  admin_state_up = "true"

  depends_on = [
    selectel_vpc_project_v2.project_1,
    selectel_iam_serviceuser_v1.serviceuser_1
  ]

}

resource "openstack_networking_subnet_v2" "subnet_1" {
  name       = "private-subnet"
  network_id = openstack_networking_network_v2.network_1.id
  cidr       = "192.168.199.0/24"
}

data "openstack_networking_network_v2" "external_network_1" {
  external = true

  depends_on = [
    selectel_vpc_project_v2.project_1,
    selectel_iam_serviceuser_v1.serviceuser_1
  ]

}

resource "openstack_networking_router_v2" "router_1" {
  name                = "router"
  external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
  router_id = openstack_networking_router_v2.router_1.id
  subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

resource "openstack_networking_port_v2" "port_1" {
  name       = "port"
  network_id = openstack_networking_network_v2.network_1.id

  fixed_ip {
    subnet_id = openstack_networking_subnet_v2.subnet_1.id
  }
}

data "openstack_images_image_v2" "image_1" {
#  name        = "Ubuntu 20.04 LTS 64-bit"
  name        = "Debian 12 (Bookworm) 64-bit"
  most_recent = true
  visibility  = "public"

  depends_on = [
    selectel_vpc_project_v2.project_1,
    selectel_iam_serviceuser_v1.serviceuser_1
  ]

}

resource "openstack_blockstorage_volume_v3" "volume_1" {
  name                 = "boot-volume-for-server"
  size                 = "5"
  image_id             = data.openstack_images_image_v2.image_1.id
  volume_type          = "fast.ru-9a"
  availability_zone    = "ru-9a"
  enable_online_resize = true

  lifecycle {
    ignore_changes = [image_id]
  }

}

resource "openstack_blockstorage_volume_v3" "volume_2" {
  name                 = "additional-volume-for-server"
  size                 = "7"
  volume_type          = "universal.ru-9a"
  availability_zone    = "ru-9a"
  enable_online_resize = true
}

resource "openstack_compute_instance_v2" "server_1" {
  name              = "server"
  flavor_id         = openstack_compute_flavor_v2.flavor_1.id
  key_pair          = selectel_vpc_keypair_v2.keypair_1.name
  availability_zone = "ru-9a"

  network {
    port = openstack_networking_port_v2.port_1.id
  }

  lifecycle {
    ignore_changes = [image_id]
  }

  block_device {
    uuid             = openstack_blockstorage_volume_v3.volume_1.id
    source_type      = "volume"
    destination_type = "volume"
    boot_index       = 0
  }

  block_device {
    uuid             = openstack_blockstorage_volume_v3.volume_2.id
    source_type      = "volume"
    destination_type = "volume"
    boot_index       = -1
  }

  vendor_options {
    ignore_resize_confirmation = true
  }
}

resource "openstack_networking_floatingip_v2" "floatingip_1" {
  pool = "external-network"
}

resource "openstack_networking_floatingip_associate_v2" "association_1" {
  port_id     = openstack_networking_port_v2.port_1.id
  floating_ip = openstack_networking_floatingip_v2.floatingip_1.address
}

output "public_ip_address" {
#  value = openstack_networking_floatingip_v2.floatingip_1.fixed_ip
  value = openstack_networking_floatingip_v2.floatingip_1.address
}
технология_terraform.txt · Last modified: 2024/07/21 07:05 by val