This shows you the differences between two versions of the page.
Both sides previous revision Previous revision Next revision | Previous revision Next revision Both sides next revision | ||
сервис_ansible [2023/08/15 08:55] val [Роль настроенного через ifupdown узла сети] |
сервис_ansible [2024/04/22 16:45] val [Дополнительные материалы] |
||
---|---|---|---|
Line 68: | Line 68: | ||
ansible_ssh_user=vagrant | ansible_ssh_user=vagrant | ||
ansible_ssh_pass=strongpassword | ansible_ssh_pass=strongpassword | ||
+ | #ansible_sudo_pass=strongpassword | ||
ansible_become=yes | ansible_become=yes | ||
</code> | </code> | ||
Line 106: | Line 107: | ||
node1# ansible corpX -m command -a 'uname -a' | node1# ansible corpX -m command -a 'uname -a' | ||
- | # ansible kubes -a 'sed -i"" -e "/swap/s/^/#/" /etc/fstab' | ||
- | # ansible kubes -a 'swapoff -a' | ||
+ | kube1# ansible kubes -a 'sed -i"" -e "/swap/s/^/#/" /etc/fstab' | ||
+ | kube1# ansible kubes -a 'swapoff -a' | ||
+ | ИЛИ | ||
+ | (venv1) server# ansible all -a 'sed -i"" -e "/swap/s/^/#/" /etc/fstab' -i /root/kubespray/inventory/mycluster/hosts.yaml #--limit=kube4 | ||
+ | (venv1) server# ansible all -a 'swapoff -a' -i /root/kubespray/inventory/mycluster/hosts.yaml #--limit=kube4 | ||
node1# ansible corpX -f 2 -m apt -a 'pkg=apache2 state=present update_cache=true' | node1# ansible corpX -f 2 -m apt -a 'pkg=apache2 state=present update_cache=true' | ||
- | node1# ansible addnodes -vv -f 5 -m apt -a 'pkg=ceph,tgt-rbd state=present update_cache=true' | + | node1# ansible addnodes -vv -f 5 -m apt -a 'pkg=ceph,tgt-rbd state=present update_cache=true' #-e 'https_proxy=http://radio.specialist.ru:3128/' -e 'http_proxy=http://radio.specialist.ru:3128/' |
server# ansible nodes -f 3 -m apt -a 'pkg=openvpn state=present update_cache=true' | server# ansible nodes -f 3 -m apt -a 'pkg=openvpn state=present update_cache=true' | ||
Line 141: | Line 145: | ||
λ touch provision_docker.yml | λ touch provision_docker.yml | ||
+ | |||
+ | или | ||
+ | |||
+ | student@node1:~$ cat /vagrant/provision_docker.yml | ||
</code><code> | </code><code> | ||
- hosts: "{{ variable_host | default('all') }}" | - hosts: "{{ variable_host | default('all') }}" | ||
Line 359: | Line 367: | ||
</code> | </code> | ||
===== Использование handlers ===== | ===== Использование handlers ===== | ||
+ | |||
+ | ==== Пример 1 ==== | ||
* [[Сервис HTTP#Использование домашних каталогов]] | * [[Сервис HTTP#Использование домашних каталогов]] | ||
Line 382: | Line 392: | ||
</code> | </code> | ||
+ | ==== Пример 2 ==== | ||
+ | |||
+ | <code> | ||
+ | server# cat za.conf | ||
+ | </code><code> | ||
+ | ListenIP=0.0.0.0 | ||
+ | StartAgents=0 | ||
+ | ServerActive=server | ||
+ | UserParameter=listinstalledsoft,ls /usr/share/applications | awk -F '.desktop' ' { print $1}' - | ||
+ | </code><code> | ||
+ | node1# cat za.yml | ||
+ | </code><code> | ||
+ | - hosts: lin_ws | ||
+ | tasks: | ||
+ | - name: Install zabbix agent | ||
+ | apt: pkg=zabbix-agent state=present update_cache=true | ||
+ | |||
+ | - name: Create conf file | ||
+ | copy: src=za.conf dest=/etc/zabbix/zabbix_agentd.conf.d/za.conf | ||
+ | notify: | ||
+ | - restart za | ||
+ | |||
+ | handlers: | ||
+ | - name: restart za | ||
+ | service: name=zabbix-agent state=restarted | ||
+ | </code><code> | ||
+ | server# ansible-playbook za.yml | ||
+ | </code> | ||
===== Использование ролей ===== | ===== Использование ролей ===== | ||
Line 391: | Line 429: | ||
<code> | <code> | ||
- | # ###cd conf/ansible/roles/ | + | # ###cd /root/conf/ |
+ | # ###git pull origin master | ||
+ | # ###cd /root/conf/ansible/roles/ | ||
# cat nodes.yml | # cat nodes.yml | ||
Line 398: | Line 438: | ||
hosts: addnodes | hosts: addnodes | ||
# hosts: kubes | # hosts: kubes | ||
+ | # hosts: all | ||
roles: | roles: | ||
- node | - node | ||
Line 472: | Line 513: | ||
ИЛИ | ИЛИ | ||
- | # ansible-playbook -f 5 conf/ansible/roles/nodes.yml | + | # ansible-playbook -f 5 /root/conf/ansible/roles/nodes.yml |
+ | |||
+ | ИЛИ | ||
+ | |||
+ | (venv1) server# ansible-playbook -f 5 /root/conf/ansible/roles/nodes.yml -i /root/kubespray/inventory/mycluster/hosts.yaml #--limit=kube4 | ||
</code> | </code> | ||
Line 668: | Line 713: | ||
- role: proxy | - role: proxy | ||
- role: thunderbird | - role: thunderbird | ||
+ | </code><code> | ||
+ | client1:~/ansible-pull-gpo# ansible-playbook local.yml | ||
</code> | </code> | ||
Line 673: | Line 720: | ||
<code> | <code> | ||
- | client3:~# ansible-pull -U http://gate.corp13.un/user1/ansible-pull-gpo.git | + | client3:~# ###ansible-pull -U http://gate.corpX.un/user1/ansible-pull-gpo.git |
</code><code> | </code><code> | ||
client1:~/ansible-pull-gpo# cat start.sh | client1:~/ansible-pull-gpo# cat start.sh | ||
Line 683: | Line 730: | ||
echo -e "0 */2 * * * \ | echo -e "0 */2 * * * \ | ||
- | /usr/bin/ansible-pull -s 120 -U http://gate.corp13.un/user1/ansible-pull-gpo.git -C $BR 2>&1 | /usr/bin/logger -t ansible-pull\n\ | + | /usr/bin/ansible-pull -s 120 -U http://gate.corpX.un/user1/ansible-pull-gpo.git -C $BR 2>&1 | /usr/bin/logger -t ansible-pull\n\ |
- | @reboot sleep 1m; /usr/bin/ansible-pull -U http://gate.corp13.un/user1/ansible-pull-gpo.git -C $BR 2>&1 | /usr/bin/logger -t ansible-pull" | crontab - | + | @reboot sleep 1m; /usr/bin/ansible-pull -U http://gate.corpX.un/user1/ansible-pull-gpo.git -C $BR 2>&1 | /usr/bin/logger -t ansible-pull" | crontab - |
+ | |||
+ | init 6 | ||
</code> | </code> | ||
Line 743: | Line 792: | ||
====== Дополнительные материалы ====== | ====== Дополнительные материалы ====== | ||
+ | |||
+ | ===== Вместо ansible ===== | ||
<code> | <code> | ||
- | выполнение команд на цисках через ансибл | + | for i in 1 2 3; do ssh node$i "apt update && apt install apache2; done |
+ | </code> | ||
+ | ===== выполнение команд на цисках через ансибл ===== | ||
+ | <code> | ||
1. добавить в /etc/ansible/group_vars/all.yml строки | 1. добавить в /etc/ansible/group_vars/all.yml строки | ||
ansible_connection: network_cli | ansible_connection: network_cli |