Table of Contents

Технология KVM

Установка

# egrep -c '(vmx|svm)' /proc/cpuinfo

# time apt install qemu-kvm libvirt-daemon libvirt-clients bridge-utils virt-manager #qemu
real    4m27.401s

# apt install libosinfo-bin
# osinfo-query os

Создание образа VM из дистрибутива

node1# wget -O /tmp/WindowsXPProSP3_rus_boot.iso http://val.bmstu.ru/unix/iso/WindowsXPProSP3_rus_boot.iso

node1# virt-install --name winxp --os-type windows --os-variant winxp \
--ram 512 --vcpu 1 --disk path=/disk3/winxp.qcow2,size=2 \
--graphics vnc,listen=0.0.0.0 --noautoconsole --hvm \
--network bridge=br0,model=rtl8139 \
--cdrom /tmp/WindowsXPProSP3_rus_boot.iso --boot cdrom,hd

node1# virsh list --all

node1# virsh vncdisplay winxp1
:0

Подключаемся любым vnc клиентом (Сервис VNC) к порту 5900, в процессе инсталляции XP VM надо будет повторно запустить

# virsh start winxp

# virsh shutdown winxp
или принудительная остановка VM
# virsh destroy winxp

# virsh dumpxml winxp | tee winxp.xml

# virsh undefine winxp

# cp /disk3/winxp.qcow2 /disk3/winxp1.qcow2

Создание конфигурации VM из образа

node1# wget http://val.bmstu.ru/unix/img/winxp.qcow2.zip

node1# time zcat winxp.qcow2.zip > /disk3/winxp1.qcow2

node1# virt-install --name winxp1 --os-type windows --os-variant winxp \
--ram 512 --vcpu 1 --disk path=/disk3/winxp1.qcow2 --graphics vnc,listen=0.0.0.0 \
--noautoconsole --hvm --boot hd --network bridge=br0,model=rtl8139 \
--print-xml | tee /disk3/winxp1.xml

node1# virsh define /disk3/winxp1.xml
node1# virsh edit winxp1
node1# virsh start winxp1
node1# virsh undefine winxp1

или, запуск без регистрации VM в KVM

node1# virsh create /disk3/winxp1.xml

node1# virsh list --all

node1# virsh vncdisplay winxp1

Миграция VM

node1# virsh migrate --live winxp1 qemu+ssh://node2/system

node1# virsh -c qemu+ssh://node2/system list

node1# virsh -c qemu+ssh://node2/system shutdown winxp1

Интеграция с Pacemaker

node1# ssn2 virsh -c qemu+ssh://node1/system list    # проверка ssh доступа с node2 на node1

crm(live/node1)configure# primitive pr_vm_winxp1 ocf:heartbeat:VirtualDomain params config=/disk3/winxp1.xml migration_transport=ssh meta allow-migrate=true

Интеграция с Cloud-init

Загрузка образа

# wget https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img
# wget https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2

# qemu-img info --output json /root/noble-server-cloudimg-amd64.img

# time apt install libguestfs-tools
real    1m47.380s

# time virt-customize -a /root/noble-server-cloudimg-amd64.img --root-password password:'strongpassword'
# time virt-customize -a /root/debian-12-generic-amd64.qcow2 --root-password password:'strongpassword'
real    1m26.032s

Создание пула

# virsh pool-define-as pool-nodes --type dir --target /var/lib/libvirt/pool-nodes

# mkdir /var/lib/libvirt/pool-nodes

# virsh pool-start pool-nodes

# virsh pool-list --all

# ###virsh pool-destroy --pool pool-nodes
# ###virsh pool-delete --pool pool-nodes
# ###virsh pool-undefine --pool pool-nodes

Создание тома

# apt install jq 

# virsh vol-create-as --pool pool-nodes --name disk-node1 --capacity $(qemu-img info --output json /root/noble-server-cloudimg-amd64.img | jq -r .[\"virtual-size\"])
# virsh vol-create-as --pool pool-nodes --name disk-node1 --capacity $(qemu-img info --output json /root/debian-12-generic-amd64.qcow2 | jq -r .[\"virtual-size\"])

# virsh vol-upload --pool pool-nodes --vol disk-node1 --file /root/noble-server-cloudimg-amd64.img
# virsh vol-upload --pool pool-nodes --vol disk-node1 --file /root/debian-12-generic-amd64.qcow2

# virsh vol-info --pool pool-nodes --vol disk-node1

# qemu-img resize /var/lib/libvirt/pool-nodes/disk-node1 +5G

# ###virsh vol-delete --pool pool-nodes --vol disk-node1

Создание образа cloud-init

# mkdir -p nodes; cd $_
# apt install genisoimage

# genisoimage -output cloud-init-node1.iso -volid cidata -joliet -rock user-data meta-data network-config

# virsh vol-create-as --pool pool-nodes --name cloud-init-node1 --capacity $(stat -Lc%s cloud-init-node1.iso) --format raw

# virsh vol-upload --pool pool-nodes --vol cloud-init-node1 --file cloud-init-node1.iso

# virsh vol-list --pool pool-nodes

# ###virsh vol-delete --pool pool-nodes --vol cloud-init-node1

Создание, регистрация и запуск VM

# virt-install --osinfo list | grep debian

# virt-install \
--name domain-node1 \
--osinfo debian11 \
--ram=2048 \
--vcpus=2 \
--network bridge:br0 \
--disk vol=pool-nodes/disk-node1 \
--disk vol=pool-nodes/cloud-init-node1,device=cdrom \
--boot hd \
--serial pty \
--autoconsole text \
--graphics vnc,listen=0.0.0.0 \
--import
Escape character is ^] (Ctrl + ])
...
node1 login:
...
[  OK  ] Reached target cloud-init.target - Cloud-init target.

# virsh list --all

# ###virsh console --domain domain-node1
# ###virsh --connect qemu:///system console domain-node1

# virsh vncdisplay domain-node1

# virsh --connect qemu:///system start domain-node1

# ###virsh destroy domain-node1
# ###virsh undefine --domain domain-node1

Исходные материалы

op_defaults op-options: \
        timeout=240s



virsh list --all

virt-install --name winxp1 --os-type windows --os-variant winxp --ram 512 \
--vcpu 1 --disk path=/var/lib/libvirt/winxp1.qcow2,size=2 \
--graphics vnc,listen=0.0.0.0 --noautoconsole \
--hvm --cdrom /var/lib/libvirt/boot/WindowsXPProSP3_rus_boot.iso --boot cdrom,hd

virsh domiflist winxp1

virsh detach-interface --domain winxp1 --type network --mac 52:54:00:47:2f:eb --config

virsh attach-interface --domain winxp1 --type bridge --source br0 --model virtio --config

https://www.linux.org.ru/forum/general/9132978

wget https://launchpad.net/kvm-guest-drivers-windows/20120712/20120712/+download/virtio-win-drivers-20120712-1.vfd


# virsh attach-disk winxp1 /var/lib/libvirt/floppy/virtio-win-drivers-20120712-1.vfd fda --type floppy --config
# virsh detach-disk winxp1 fda --config


# virsh vncdisplay winxp1



# virsh -c qemu+ssh://node2/system list --all

===

https://unix.stackexchange.com/questions/405955/virsh-pool-storage-basics
virsh pool storage basics

# virsh undefine winxp2

# mv /var/lib/libvirt/images/winxp2.qcow2 /disk2/images/

https://serverfault.com/questions/919538/do-not-start-guest-after-virt-install
Do not start guest after virt-install

# virt-install --name winxp2 --os-type windows --os-variant winxp \
--ram 512 --vcpu 1 --disk path=/disk2/images/winxp2.qcow2 \
--graphics vnc,listen=0.0.0.0 --noautoconsole --hvm --boot hd \
--network bridge=br0,model=rtl8139 --print-xml | tee /disk2/winxp2.xml

root@node1.corp13.un:~# cat /disk2/winxp2.xml
<domain type="kvm">
  <name>winxp2</name>
  <uuid>49df6ece-0b2e-43b4-ad79-d23493db0ad5</uuid>
  <metadata>
    <libosinfo:libosinfo xmlns:libosinfo="http://libosinfo.org/xmlns/libvirt/domain/1.0">
      <libosinfo:os id="http://microsoft.com/win/xp"/>
    </libosinfo:libosinfo>
  </metadata>
  <memory>524288</memory>
  <currentMemory>524288</currentMemory>
  <vcpu>1</vcpu>
  <os>
    <type arch="x86_64" machine="pc-i440fx-focal">hvm</type>
    <boot dev="hd"/>
  </os>
  <features>
    <acpi/>
    <apic/>
    <hyperv>
      <relaxed state="on"/>
      <vapic state="on"/>
      <spinlocks state="on" retries="8191"/>
    </hyperv>
  </features>
  <cpu mode="host-model"/>
  <clock offset="localtime">
    <timer name="rtc" tickpolicy="catchup"/>
    <timer name="pit" tickpolicy="delay"/>
    <timer name="hpet" present="no"/>
    <timer name="hypervclock" present="yes"/>
  </clock>
  <pm>
    <suspend-to-mem enabled="no"/>
    <suspend-to-disk enabled="no"/>
  </pm>
  <devices>
    <emulator>/usr/bin/qemu-system-x86_64</emulator>
    <disk type="file" device="disk">
      <driver name="qemu" type="qcow2"/>
      <source file="/disk2/images/winxp2.qcow2"/>
      <target dev="hda" bus="ide"/>
    </disk>
    <controller type="usb" index="0" model="ich9-ehci1"/>
    <controller type="usb" index="0" model="ich9-uhci1">
      <master startport="0"/>
    </controller>
    <controller type="usb" index="0" model="ich9-uhci2">
      <master startport="2"/>
    </controller>
    <controller type="usb" index="0" model="ich9-uhci3">
      <master startport="4"/>
    </controller>
    <interface type="bridge">
      <source bridge="br0"/>
      <mac address="52:54:00:7b:b5:6e"/>
      <model type="rtl8139"/>
    </interface>
    <console type="pty"/>
    <input type="tablet" bus="usb"/>
    <graphics type="vnc" port="-1" listen="0.0.0.0"/>
    <video>
      <model type="qxl"/>
    </video>
  </devices>
</domain>


# virsh define /disk2/winxp2.xml
....... create .... ???




virsh -c qemu+ssh://node2/system shutdown winxp2


# cat /disk2/drbd-pool.xml
<pool type='dir'>
  <name>drbd-pool</name>
  <target>
    <path>/disk2/images</path>
  </target>
</pool>

  746  virsh pool-define /disk2/drbd-pool.xml
  748  virsh pool-start drbd-pool
  750  virsh pool-autostart drbd-pool

node1# virsh migrate --live winxp qemu+ssh://node2/system

primitive pr_vm_winxp2 ocf:heartbeat:VirtualDomain params config=/disk2/winxp2.xml migration_transport=ssh meta allow-migrate=true

primitive pr_vm_winxp2 VirtualDomain \
        params config="/disk2/winxp2.xml" migration_transport=ssh \
        meta allow-migrate=true target-role=Stopped \
        utilization cpu=1 hv_memory=512



======

https://blog.bayrell.org/ru/linux/libvirt/8-ustanovka-lxc-konteynerov-cherez-libvirt.html
https://stackoverflow.com/questions/63685175/qemuvirt-manager-cant-connect-to-virtlxcd-sock

sudo apt install libvirt-daemon-driver-lxc
sudo systemctl restart libvirtd

virsh -c lxc:///

virsh -c lxc+ssh://node1/