C:\cmder\cmder.exe
bash cd git clone http://val.bmstu.ru/unix/conf.git cd conf/virtualbox/ ./setup.sh X 6
node1# ifconfig eth2 inet 10.5.1.100+X/24 nodeN# cat /etc/hostname
nodeN.corpX.un
nodeN# cat /etc/hosts
127.0.0.1 localhost 192.168.X.1 node1.corpX.un node1 192.168.X.2 node2.corpX.un node2 10.5.1.254 proxy
nodeN# cat /etc/resolv.conf
search corpX.un nameserver 192.168.X.1 nameserver 192.168.X.2
nodeN# cat /etc/network/interfaces
auto lo iface lo inet loopback auto eth0 iface eth0 inet static address 192.168.X.N netmask 255.255.255.0 auto eth1 iface eth1 inet manual up ip link set eth1 up auto eth2 iface eth2 inet static address 10.5.1.N*100+X netmask 255.255.255.0 auto eth3 iface eth3 inet manual up ip link set eth3 up
nodeN# cat /etc/sysctl.conf
... net.ipv4.ip_forward=1 ...
nodeN# cat .bashrc
... export http_proxy=http://proxy:3128/ ...
nodeN# init 6
nodeN# ping proxy nodeN# apt update
node2# ifconfig eth0 inet 192.168.X.2/24 node1# ping node2 node1# ssh-keygen ... Enter passphrase (empty for no passphrase): Пароль на ключ пустой!!! ... node1# ssh-copy-id node2
Проверка:
node1# ssh node2 hostname scp /etc/hosts node2:/etc/ scp /etc/resolv.conf node2:/etc/ scp /etc/sysctl.conf node2:/etc/ scp .bashrc node2: scp /etc/hostname node2:/etc/ scp /etc/network/interfaces node2:/etc/network/
Сценарий: настроить DNS на node1, на node2 конфигурация появится автоматически
node1# sh conf/dns.sh node1# cat /etc/bind/corpX.un
$TTL 3h @ SOA ns root.ns 1 1d 12h 1w 3h NS ns ns A 192.168.X.1 ns A 192.168.X.2 ;node1 A 192.168.X.1 ;node2 A 192.168.X.2 $GENERATE 1-9 node$ A 192.168.X.$ gate A 192.168.X.254
node1# ssn2 apt update node1# ssn2 apt install bind9
node1# csync2 -xv nodeN# host ns node1 nodeN# host ns node2
Сценарий: на обоих узлах создаем пользователя user1 с uid=10001
Примечание: выполнить в 5-м модуле
nodeN# sh conf/dhcp.sh
Примечания:
# ntpdate -u proxy # grep dhcp /var/log/syslog
C:\>ping gate
Сценарий:
nodeN# apt install fake nodeN# cat /usr/share/ucarp/vip-up
#!/bin/sh /sbin/ifup $1:ucarp ip addr add 172.16.1.X/24 dev eth1 # send_arp 172.16.1.X `cat /sys/class/net/eth1/address` 172.16.1.254 ff:ff:ff:ff:ff:ff eth1 ip route delete default ip route add default via 172.16.1.254
nodeN# cat /usr/share/ucarp/vip-down
#!/bin/sh /sbin/ifdown $1:ucarp ip addr del 172.16.1.X/24 dev eth1 ip route add default via 192.168.X.254
masternode# killall -USR2 ucarp или masternode# init 6
root@nodeN:~# cat /etc/network/interfaces
... auto eth3 iface eth3 inet manual up ip link set eth3 up
node1# cat /etc/bind/named.conf
... forwarders { 172.16.1.254; 172.16.2.254; }; ...
node1# csync2 -xv
nodeN# cat /usr/share/ucarp/vip-up
#!/bin/sh ... ip addr add 172.16.2.X/24 dev eth3 # send_arp 172.16.2.X `cat /sys/class/net/eth3/address` 172.16.2.254 ff:ff:ff:ff:ff:ff eth3 ... ip route delete default #ip route add default via 172.16.1.254
nodeN# cat /usr/share/ucarp/vip-down
... ip addr del 172.16.2.X/24 dev eth3 ...
masternode# killall -USR2 ucarp ... masternode# grep carp /var/log/syslog masternode# ip a | grep 172.16.2
masternode# ping 172.16.2.254 masternode# ip route add default via 172.16.2.254 masternode# ping ya.ru masternode# ip route delete default
Сценарий:
nodeN# cat /usr/share/ucarp/vip-up
... ip route add default via 172.16.1.254 table 101 ip route add default via 172.16.2.254 table 102 #### /root/select_isp.sh
masternode# killall -USR2 ucarp ... masternode# grep carp /var/log/syslog masternode# ip route show table all | grep 'table 10[12]'
# cat set_isp.sh
#!/bin/sh case $1 in ISP1) ip rule del from 192.168/16 to 192.168/16 table main while ip rule del from any table 101;do true;done while ip rule del from any table 102;do true;done ip rule add from 192.168.X.0/24 table 101 ip rule add from 192.168/16 to 192.168/16 table main /sbin/ip route flush cache /usr/sbin/conntrack -F ;; ISP2) ip rule del from 192.168/16 to 192.168/16 table main while ip rule del from any table 101;do true;done while ip rule del from any table 102;do true;done ip rule add from 192.168.X.0/24 table 102 ip rule add from 192.168/16 to 192.168/16 table main /sbin/ip route flush cache /usr/sbin/conntrack -F ;; ISP1ISP2) ip rule del from 192.168/16 to 192.168/16 table main while ip rule del from any table 101;do true;done while ip rule del from any table 102;do true;done ip rule add from 192.168.X.0/25 table 101 ip rule add from 192.168.X.128/25 table 102 ip rule add from 192.168/16 to 192.168/16 table main /sbin/ip route flush cache /usr/sbin/conntrack -F ;; esac
nodeN# chmod +x set_isp.sh masternode# grep carp /var/log/syslog masternode# /root/set_isp.sh ISP1ISP2 masternode# ip rule show backupnode# traceroute -n ya.ru C:\Users\student>tracert ya.ru nodeN# cat select_isp.sh
#!/bin/sh ip a | grep -q '192.168.*254' || exit 0 ip route delete default ISP='' ip route add default via 172.16.1.254 ping -c3 ya.ru && ISP=ISP1 ip route delete default ip route add default via 172.16.2.254 ping -c3 ya.ru && ISP=${ISP}ISP2 ip route delete default echo $ISP # exit 0 touch /tmp/current_isp test $ISP = "`cat /tmp/current_isp`" && exit 0 echo $ISP > /tmp/current_isp /root/set_isp.sh $ISP
nodeN# chmod +x select_isp.sh masternode# /root/select_isp.sh masternode# cat /tmp/current_isp nodeN# crontab -l
* * * * * /root/select_isp.sh >/dev/null 2>&1
master_node# ip route show table 101 master_node# ip route show table 102 master_node# ip rule show
Сценарий: отказоустойчивый www хостинг или web хранилище пользовательских файлов
node1# cat /etc/bind/corpX.un
... www A 192.168.X.10
node1# csync2 -xv
Проводник: ftp://node1 l: user1 p: password1 Каталог: public_html Файл: index.html (Можно любой :) В Браузере: http://node1/~user1
nodeN# service proftpd stop nodeN# update-rc.d -f proftpd remove или debian11# systemctl disable proftpd
Проводник: ftp://www В Браузере: http://www.corpX.un/~user1
node2# ssh-keygen node2# ssh-copy-id node1 nodeN# crontab -l
... * * * * * ps ax | grep -v grep | grep -q 'proftpd: (accepting connections)' && /usr/bin/rsync -az --delete /home/ nodeM:/home/
Сценарий: Создаем отказоустойчивый корпоративный файловый сервер. Первый узел кластера должен использоваться сервером по умолчанию.
nodeN# systemctl disable istgt node1# crm configure crm(live)configure# primitive pr_srv_istgt lsb:istgt crm(live)configure# primitive pr_ip_istgt ocf:heartbeat:IPaddr2 params ip=192.168.X.15 cidr_netmask=32 nic=eth0 crm(live)configure# group gr_ip_fs pr_fs_r0 pr_ip_istgt pr_srv_istgt crm(live)configure# commit
node1# cat /etc/bind/corpX.un
... fs A 192.168.X.20
node1# csync2 -xv
root@nodeN:~# service smbd stop root@nodeN:~# service nmbd stop root@nodeN:~# systemctl disable smbd root@nodeN:~# systemctl disable nmbd
node1# crm configure crm(live)configure# primitive pr_srv_smbd systemd:smbd crm(live)configure# primitive pr_ip_smbd ocf:heartbeat:IPaddr2 params ip=192.168.X.20 cidr_netmask=32 nic=eth0 crm(live)configure# group gr_ip_fs pr_fs_r0 pr_ip_smbd pr_srv_smbd или crm(live)configure# edit gr_ip_fs crm(live)configure# commit
master# cat /proc/drbd master# crm_mon -1
master# crm resource stop pr_srv_smbd pr_ip_smbd master# crm configure delete pr_srv_smbd pr_ip_smbd gr_ip_fs
node1# cat /etc/bind/corpX.un
... server A 192.168.X.30 @ MX 10 server
node1# csync2 -xv
nodeN# apt install bridge-utils nodeN# cat /etc/network/interfaces
... auto br0 iface br0 inet static bridge_ports eth0 #bridge_hw 08:00:27:12:34:0N #Debian11 generate same mac on nodes address 192.168.X.N netmask 255.255.255.0 ... iface br0:ucarp inet static address 192.168.X.254 netmask 255.255.255.255 ...
nodeN# cat /etc/default/isc-dhcp-server
... INTERFACESv4="br0"
nodeN# init 0
nodeN# ps ax | grep carp nodeN# ps ax | grep dhcp node1# mount | grep ext nodeN# cat /proc/drbd
node2# service pacemaker stop node2# drbdadm invalidate r0 node2# service pacemaker start
root@nodeN:~# rmdir /var/lib/lxc/ root@nodeN:~# ln -s /disk2/var/lib/lxc/ /var/lib/lxc
root@node1.corpX.un:~# mkdir -p /disk2/var/lib/lxc/ node1 debian10 # lxc-create -t debian -n server -- -r buster root@node1.corpX.un:~# lxc-create -t debian -n server
root@node1.corpX.un:~# cp /etc/ssh/sshd_config /var/lib/lxc/server/rootfs/etc/ssh/sshd_config root@node1.corpX.un:~# cp /etc/hosts /var/lib/lxc/server/rootfs/etc/ root@node1.corpX.un:~# chroot /var/lib/lxc/server/rootfs/ /bin/bash root@node1:/# PS1='server:\w# ' server:/# apt update server:/# apt purge isc-dhcp-client server:/# apt install nano vim iputils-ping
server:/# cat /etc/hostname
server.corpX.un
server:/# cat /etc/hosts
127.0.0.1 localhost 192.168.X.30 server.corpX.un server
server:/# passwd
root@node1.corpX.un:~# cat /var/lib/lxc/server/config
... lxc.net.0.type = veth lxc.net.0.link = br0 lxc.net.0.flags = up lxc.net.0.ipv4.address = 192.168.X.30/24 lxc.net.0.ipv4.gateway = 192.168.X.254
root@node1.corpX.un:~# lxc-info -n server root@node1.corpX.un:~# lxc-start -n server root@node1.corpX.un:~# lxc-info -n server root@node1.corpX.un:~# lxc-attach -n server -- ps ax root@node1.corpX.un:~# ssh server root@node1.corpX.un:~# lxc-stop -n server root@node1.corpX.un:~# systemctl start lxc@server root@node1.corpX.un:~# systemctl stop lxc@server
primitive pr_lxc_server systemd:lxc@server group gr_fs_lxc pr_fs_r0 pr_lxc_server
server# apt install ansible git
server# ansible-playbook conf/ansible/roles/mail.yml или node1# nano conf/ansible/roles/mail.yml node1# nano /etc/ansible/hosts node1# ansible-playbook conf/ansible/roles/mail.yml
node1# crm resource move gr_fs_lxc node2