This is an old revision of the document!
# VER=1.9.2
# wget https://hashicorp-releases.yandexcloud.net/terraform/$VER/terraform_${VER}_linux_amd64.zip
# unzip terraform_${VER}_linux_amd64.zip
# mv terraform /usr/local/bin/
# terraform version
~# wget https://github.com/dmacvicar/terraform-provider-libvirt/releases/download/v0.7.6/terraform-provider-libvirt_0.7.6_linux_amd64.zip ~# unzip terraform-provider-libvirt_0.7.6_linux_amd64.zip ~# mkdir -p ~/.local/share/terraform/plugins/registry.terraform.io/dmacvicar/libvirt/0.7.6/linux_amd64 ~# mv ./terraform-provider-libvirt_v0.7.6 ~/.local/share/terraform/plugins/registry.terraform.io/dmacvicar/libvirt/0.7.6/linux_amd64/terraform-provider-libvirt ~# mkdir -p nodes; cd $_
~/nodes# cat node1.tf
terraform {
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
version = "0.7.6"
}
}
}
~/nodes# terraform init ~/nodes# find .terraform/ ... .terraform/providers/registry.terraform.io/dmacvicar/libvirt/0.7.6/linux_amd64 ...
~/nodes# cp network-config network-config.tftpl ~/nodes# cat meta-data.tftpl
local-hostname: ${name}
~/nodes# cat network-config.tftpl
...
ens3:
dhcp4: true
# dhcp4: false
# addresses:
# - 192.168.X.${ip}/24
...
~/nodes# cat node1.tf
...
provider "libvirt" {
uri = "qemu:///system"
}
resource "libvirt_pool" "pool-nodes" {
name = "pool-nodes"
type = "dir"
path = "/var/lib/libvirt/pool-nodes"
}
resource "libvirt_volume" "image" {
name = "linux-cloudimg"
pool = libvirt_pool.pool-nodes.name
# source = "/root/noble-server-cloudimg-amd64.img"
source = "/root/debian-12-generic-amd64.qcow2"
}
resource "libvirt_volume" "disk-node1" {
name = "disk-node1"
pool = libvirt_pool.pool-nodes.name
base_volume_id = libvirt_volume.image.id
size = 10 * 1024 * 1024 * 1024
}
#data "template_file" "user-data" {
#...
#}
locals {
meta-data = templatefile("${path.module}/meta-data.tftpl", { name = "node1" })
user-data = templatefile("${path.module}/user-data", {})
network-config = templatefile("${path.module}/network-config.tftpl", { ip = 201 })
}
resource "libvirt_cloudinit_disk" "commoninit" {
name = "commoninit.iso"
meta_data = local.meta-data
user_data = local.user-data
network_config = local.network-config
pool = libvirt_pool.pool-nodes.name
}
resource "libvirt_domain" "domain-node1" {
name = "domain-node1"
memory = "2048"
vcpu = 2
cloudinit = libvirt_cloudinit_disk.commoninit.id
qemu_agent = true
autostart = true
network_interface {
bridge = "br0"
wait_for_lease = true
}
timeouts {
create = "30m"
}
console {
type = "pty"
target_port = "0"
target_type = "serial"
}
graphics {
type = "vnc"
listen_type = "address"
listen_address = "0.0.0.0"
autoport = "true"
}
disk {
volume_id = libvirt_volume.disk-node1.id
}
}
output "vm_name" {
value = libvirt_domain.domain-node1.name
description = "VM name"
}
output "vm_ip" {
value = libvirt_domain.domain-node1.network_interface[0].addresses.0
description = "Interface IPs"
}
~/nodes# terraform validate ~/nodes# terraform plan ~/nodes# terraform apply #-auto-approve ~/nodes# terraform state list ~/nodes# terraform state show libvirt_domain.domain-node1 ~/nodes# virsh console --domain domain-node1 ~/nodes# virsh vncdisplay domain-node1 ~/nodes# terraform destroy #-auto-approve ~/nodes# ### rm -r .terraform .terraform.lock.hcl terraform.tfstate terraform.tfstate.backup
home-computer:~/nodes# cat variables.tf
variable "domains" {
description = "List of VMs with specified parameters"
type = list(object({
name = string,
ip = string
}))
}
home-computer:~/nodes# cat terraform.tfvars
domains = [
{
name = "node1"
ip = "201"
},
{
name = "node2"
ip = "202"
},
{
name = "node3"
ip = "203"
}
]
home-computer:~/nodes# cat meta-data.tftpl
local-hostname: ${name}
home-computer:~/nodes# cat network-config.tftpl
version: 2
ethernets:
ens3:
dhcp4: false
addresses:
- 192.168.X.${ip}/24
routes:
- to: default
via: 192.168.X.1
nameservers:
search: [isp.un, corpX.un]
addresses: [192.168.X.10]
home-computer:~/nodes# cat nodes.tf
...
resource "libvirt_volume" "disk-noden" {
count = length(var.domains)
name = "disk-${var.domains[count.index].name}"
pool = libvirt_pool.pool-nodes.name
base_volume_id = libvirt_volume.image.id
size = 10 * 1024 * 1024 * 1024
}
locals {
user-data = templatefile("${path.module}/user-data", {})
meta-data = [
for n in range(length(var.domains)) : templatefile("${path.module}/meta-data.tftpl", {
name = "${var.domains[n].name}"
})
]
network-config = [
for n in range(length(var.domains)) : templatefile("${path.module}/network-config.tftpl", {
ip = "${var.domains[n].ip}"
})
]
}
resource "libvirt_cloudinit_disk" "commoninit" {
count = length(var.domains)
name = "commoninit-${var.domains[count.index].name}.iso"
meta_data = local.meta-data[count.index]
user_data = local.user-data
network_config = local.network-config[count.index]
pool = libvirt_pool.pool-nodes.name
}
resource "libvirt_domain" "domain-noden" {
count = length(var.domains)
name = "domain-${var.domains[count.index].name}"
memory = "2048"
vcpu = 2
cloudinit = libvirt_cloudinit_disk.commoninit[count.index].id
qemu_agent = true
autostart = true
network_interface {
bridge = "br0"
wait_for_lease = true
}
timeouts {
create = "30m"
}
console {
type = "pty"
target_port = "0"
target_type = "serial"
}
graphics {
type = "vnc"
listen_type = "address"
listen_address = "0.0.0.0"
autoport = "true"
}
disk {
volume_id = libvirt_volume.disk-noden[count.index].id
}
}
output "vms_info" {
description = "General information about created VMs"
value = [
for vm in libvirt_domain.domain-noden : {
id = vm.name
ip = vm.network_interface[0].addresses.0
}
]
}