This is an old revision of the document!
Запустите с правами Administrator C:\cmder\cmder.exe bash cd git clone http://val.bmstu.ru/unix/conf.git cd conf/virtualbox/ ./setup.sh X 6
ubuntu# sh net_gate.sh ubuntu# init 6
gate# sh conf/dhcp.sh
windows> ping 1.1.1.1
gate# cat /etc/network/interfaces
auto lo
iface lo inet loopback
auto eth0
iface eth0 inet static
address 192.168.X.1
netmask 255.255.255.0
auto eth1
iface eth1 inet manual
up ip link set eth1 up
#auto eth2
#iface eth2 inet manual
# up ip link set eth2 up
gate# cat /etc/hostname
node1.corpX.un
gate# cat /etc/hosts
127.0.0.1 localhost 192.168.X.1 node1.corpX.un node1 192.168.X.2 node2.corpX.un node2
gate# init 6
node1# ping ya.ru windows> ping 1.1.1.1
node2# ifconfig eth0 inet 192.168.X.2/24 node1# ping node2 node1# ssh-keygen ... Enter passphrase (empty for no passphrase): Пароль на ключ пустой!!! ... node1# ssh-copy-id node2
Проверка:
node1# scp /etc/hosts node2:/etc/ scp /etc/hostname node2:/etc/ scp /etc/network/interfaces node2:/etc/network/ node1# ssh node2 route add default gw 192.168.X.254 node1# ssh node2 apt update node1# ssh node2 apt install keepalived scp /etc/keepalived/keepalived.conf node2:/etc/keepalived/keepalived.conf scp /usr/local/bin/vrrp.sh node2:/usr/local/bin/vrrp.sh
node1# ssh node2 ubuntu# cat /etc/hostname
node2.corpX.un
node2# cat /etc/network/interfaces
...
address 192.168.X.2
...
node2# cat /etc/keepalived/keepalived.conf
...
state BACKUP
...
node2# init 6
node1# scn2 /etc/sysctl.conf node1# ssn2 sysctl -f
Сценарий: настроить DNS на node1, на node2 конфигурация появится автоматически
node1# sh conf/dns.sh node1# cat /etc/bind/corpX.un
$TTL 3h
@ SOA ns root.ns 1 1d 12h 1w 3h
NS ns
ns A 192.168.X.1
ns A 192.168.X.2
;node1 A 192.168.X.1
;node2 A 192.168.X.2
$GENERATE 1-9 node$ A 192.168.X.$
gate A 192.168.X.254
node1# ssn2 apt install bind9
node1# csync2 -xv
nodeN# host ns node1 nodeN# host ns node2
nodeN# cat /etc/resolv.conf
search corpX.un nameserver 192.168.X.1 nameserver 192.168.X.2
Сценарий: на обоих узлах создаем пользователя user1 с uid=10001
Примечание: выполнить в 5-м модуле
node2# sh conf/dhcp.sh
Примечание: в первый момент необходимо запустить оба сервера чтобы они синхронизировались.
windows> ipconfig /release windows> ipconfig /renew windows> ipconfig /all windows> ping ya.ru
windows> ping gate
root@nodeN:~# cat /etc/network/interfaces
...
auto eth2
iface eth2 inet manual
up ip link set eth2 up
nodeN# ip a | grep eth2
node1# cat /etc/bind/named.conf
...
forwarders {
172.16.1.254;
172.16.2.254;
};
...
node1# csync2 -xv
node1# ip a | grep 172.16.2 node1# ping 172.16.2.254 node1# ip route del default node1# ip route add default via 172.16.2.254 node1# ping ya.ru node1# tracepath ya.ru node1# ip route del default
Сценарий:
node1# ip route show table 101 node1# ip route show table 102 nodeN# cat set_isp.sh
#!/bin/sh
case $1 in
ISP1)
ip rule del from 192.168/16 to 192.168/16 table main
while ip rule del from any table 101;do true;done
while ip rule del from any table 102;do true;done
ip route delete default
ip rule add from 192.168.X.0/24 table 101
ip rule add from 192.168/16 to 192.168/16 table main
ip route add default via 172.16.1.254
/sbin/ip route flush cache
/usr/sbin/conntrack -F
;;
ISP2)
ip rule del from 192.168/16 to 192.168/16 table main
while ip rule del from any table 101;do true;done
while ip rule del from any table 102;do true;done
ip route delete default
ip rule add from 192.168.X.0/24 table 102
ip rule add from 192.168/16 to 192.168/16 table main
ip route add default via 172.16.2.254
/sbin/ip route flush cache
/usr/sbin/conntrack -F
;;
ISP1ISP2)
ip rule del from 192.168/16 to 192.168/16 table main
while ip rule del from any table 101;do true;done
while ip rule del from any table 102;do true;done
ip route delete default
ip rule add from 192.168.X.0/25 table 101
ip rule add from 192.168.X.128/25 table 102
ip rule add from 192.168/16 to 192.168/16 table main
ip route add default via 172.16.1.254
#ip route add default via 172.16.2.254
/sbin/ip route flush cache
/usr/sbin/conntrack -F
;;
esac
nodeN# chmod +x set_isp.sh node1# /root/set_isp.sh ISP1ISP2 node1# ip rule show node2# tracepath ya.ru windows> tracert -d ya.ru nodeN# cat select_isp.sh
#!/bin/sh
TESTIP=1.1.1.1
ip a | grep -q '192.168.*254' || exit 0
ISP=''
ip route del $TESTIP
ip route add $TESTIP via 172.16.1.254
ping -c3 $TESTIP && ISP=ISP1
ip route del $TESTIP
ip route add $TESTIP via 172.16.2.254
ping -c3 $TESTIP && ISP=${ISP}ISP2
ip route del $TESTIP
echo $ISP
# exit 0
touch /tmp/current_isp
test $ISP = "`cat /tmp/current_isp`" && exit 0
echo $ISP > /tmp/current_isp
/root/set_isp.sh $ISP
nodeN# chmod +x select_isp.sh node1# /root/select_isp.sh node1# cat /tmp/current_isp nodeN# crontab -l
* * * * * /root/select_isp.sh >/dev/null 2>&1
master_node# ip rule show
Сценарий: отказоустойчивое ftp хранилище пользовательских файлов с web доступом
node1# cat /etc/bind/corpX.un
... www A 192.168.X.10
node1# csync2 -xv
Проводник: ftp://node1 l: user1 p: password1 Каталог: public_html В Браузере: http://node1/~user1
nodeN# service proftpd stop nodeN# update-rc.d -f proftpd remove или debian11# systemctl disable proftpd
Проводник: ftp://www В Браузере: http://www.corpX.un/~user1
node2# ssh-keygen node2# ssh-copy-id node1 nodeN# crontab -l
... * * * * * ps ax | grep -v grep | grep -q 'proftpd: (accepting connections)' && /usr/bin/rsync -az --delete /home/ nodeM:/home/
Сценарий: Создаем отказоустойчивый корпоративный файловый сервер. Первый узел кластера должен использоваться сервером по умолчанию.
node1# cat /etc/bind/corpX.un
... fs A 192.168.X.20
node1# csync2 -xv
root@nodeN:~# service smbd stop service nmbd stop systemctl disable smbd systemctl disable nmbd
node1# crm configure crm(live)configure# primitive pr_srv_smbd systemd:smbd crm(live)configure# primitive pr_ip_smbd ocf:heartbeat:IPaddr2 params ip=192.168.X.20 cidr_netmask=32 nic=eth0 crm(live)configure# group gr_ip_fs pr_fs_r0 pr_ip_smbd pr_srv_smbd или crm(live)configure# group pr_fs_r0 pr_ip_smbd pr_srv_smbd или crm(live)configure# edit gr_ip_fs crm(live)configure# commit
master# watch cat /proc/drbd master# crm_mon
master# crm resource stop pr_srv_smbd pr_ip_smbd master# crm configure delete pr_srv_smbd pr_ip_smbd gr_ip_fs
node1# cat /etc/bind/corpX.un
... server1 A 192.168.X.31 server2 A 192.168.X.32 @ MX 10 server1
node1# csync2 -xv
nodeN# apt install bridge-utils nodeN# cat /etc/network/interfaces
...
auto br0
iface br0 inet static
bridge_ports eth0
#bridge_hw 08:00:27:12:34:0N #if generate same mac on nodes (Debian11)
address 192.168.X.N
...
nodeN# cat /etc/default/isc-dhcp-server
... INTERFACESv4="br0"
nodeN# init 0
nodeN# ip a node1# mount | egrep 'ext|ocfs' nodeN# cat /proc/drbd
root@nodeN:~# rmdir /var/lib/lxc/ root@nodeN:~# ln -s /disk2/var/lib/lxc/ /var/lib/lxc
root@node1.corpX.un:~# mkdir -p /disk2/var/lib/lxc/ root@node1.corpX.un:~# lxc-create -t download -n server-template -- -d ubuntu -r focal -a amd64
root@node1.corpX.un:~# chroot /var/lib/lxc/server-template/rootfs/ /bin/bash root@node1:/# PS1='server-template:\w# ' server-template:/# apt update server-template:/# apt purge netplan.io server-template:/# apt install nano ssh
server:/# cat /etc/hostname
server-template.corpX.un
server-template:/# cat /etc/hosts
127.0.0.1 localhost 192.168.X.30 server-template.corpX.un server-template
server-template:/# cat /etc/systemd/resolved.conf
[Resolve] DNS=192.168.X.1 192.168.X.2
server-template:/# passwd ubuntu
Ctrl-D
root@node1.corpX.un:~# cat /var/lib/lxc/server-template/config
... lxc.rootfs.path = dir:/var/lib/lxc/server-template/rootfs lxc.uts.name = server-template ... lxc.net.0.type = veth lxc.net.0.link = br0 lxc.net.0.flags = up lxc.net.0.ipv4.address = 192.168.X.30/24 lxc.net.0.ipv4.gateway = 192.168.X.254
root@node1.corpX.un:~#
SRC_CONTAINER=server-template
DST_CONTAINER=server1
SRC_IP=192.168.X.30
DST_IP=192.168.X.31
time cp -rp /var/lib/lxc/$SRC_CONTAINER/ /var/lib/lxc/$DST_CONTAINER/
find /var/lib/lxc/$DST_CONTAINER/rootfs/etc/ -type f -exec sed -i'' -e "s/$SRC_CONTAINER/$DST_CONTAINER/" -e "s/$SRC_IP/$DST_IP/" {} \;
sed -i'' -e "s/$SRC_CONTAINER/$DST_CONTAINER/" -e "s/$SRC_IP/$DST_IP/" /var/lib/lxc/$DST_CONTAINER/config
root@node1.corpX.un:~# lxc-info -n server1 root@node1.corpX.un:~# systemctl start lxc@server1 root@node1.corpX.un:~# ssh ubuntu@server1 root@node1.corpX.un:~# systemctl stop lxc@server1
#primitive pr_lxc_server1 systemd:lxc@server1 meta resource-stickiness=100 primitive pr_lxc_server1 systemd:lxc@server1 group gr_fs_lxc pr_fs_r0 pr_lxc_server1
server1$ sudo -i server1# apt install ansible git
server1# ansible-playbook conf/ansible/roles/mail.yml
node1# crm resource move gr_fs_lxc node2