统一划分集群keepalive virtual_route_id, 设备管理HA集群使用54.

This commit is contained in:
lijia
2020-10-23 15:54:04 +08:00
parent 506a231234
commit f7e02bae70
7 changed files with 60 additions and 145 deletions

View File

@@ -38,11 +38,13 @@ ha_master:
instance_state: MASTER instance_state: MASTER
ethname: eth0 ethname: eth0
virtual_ip: "192.168.44.58" virtual_ip: "192.168.44.58"
oam_virtual_router_id: 54
ha_backup: ha_backup:
instance_state: BACKUP instance_state: BACKUP
ethname: eth0 ethname: eth0
virtual_ip: "192.168.44.58" virtual_ip: "192.168.44.58"
oam_virtual_router_id: 54
java_version: jdk1.8.0_73 java_version: jdk1.8.0_73
bifang_api_ip: "192.168.43.5" bifang_api_ip: "192.168.43.5"

View File

@@ -0,0 +1,43 @@
- name: "copy keepalived rpm to destination server"
copy:
src: "{{ role_path }}/files/"
dest: /tmp
mode: 0755
- name: "install keepalived"
yum:
name:
- /tmp/keepalived-1.3.5-16.el7.x86_64.rpm
state: present
- name: "install ipvsadm"
yum:
name:
- /tmp/ipvsadm-1.27-8.el7.x86_64.rpm
state: present
- name: "Template check_service_health.sh.j2"
template:
src: "{{ role_path }}/templates/check_service_health.sh.j2"
dest: /etc/keepalived/check_service_health.sh
tags: template
- name: "Template keepalived_backup.conf.j2"
template:
src: "{{ role_path }}/templates/keepalived_backup.conf.j2"
dest: /etc/keepalived/keepalived.conf
tags: template
- name: Template the keepalived.service.j2
template:
src: "{{ role_path }}/files/keepalived.service.j2"
dest: "/usr/lib/systemd/system/keepalived.service"
tags: template
- name: "Start keepalived"
systemd:
name: keepalived.service
enabled: yes
state: restarted
enabled: yes

View File

@@ -1,22 +0,0 @@
#!/bin/bash
#running:0, stop:3
STATE=$(systemctl status oam_cluster.service)
if [ $STATE -ne 0 ]
then
systemctl restart oam_cluster
sleep 10
STATE=$(systemctl status oam_cluster.service)
if [ $STATE -ne 0 ]
then
killall keepalived
exit 1
else
exit 0
fi
else
exit 0
fi
#todo ,check consul_cluster, check influxdb

View File

@@ -1,50 +1,38 @@
! Configuration File for keepalived ! Configuration File for keepalived
global_defs { global_defs {
router_id LVSTEST2 router_id OAMHA
} }
#监控服务httpd, mysql等 #监控服务httpd, mysql等
vrrp_script chk_http_service { vrrp_script check_service_health {
script "/etc/keepalived/chk_http_service.sh" script "/etc/keepalived/check_service_health.sh"
#每2s检查一次 #每2s检查一次
interval 3 interval 3
#每次检查-20 #每次检查-10
weight -10 weight -10
fail 3 fail 3
#失败次数如果请求失败2次就认为此节点资源发生故障将进行切换 #失败次数如果请求失败2次就认为此节点资源发生故障将进行切换
rise 1 rise 1
#监测成功就立即成功,如果请求一次成功就默认此节点资源恢复正常 #监测成功就立即成功,如果请求一次成功就默认此节点资源恢复正常
} }
vrrp_instance VI_1 { vrrp_instance VI_1 {
state BACKUP state {{ ha_backup.instance_state }}
interface eth0 interface {{ ha_backup.ethname }}
virtual_router_id 51 virtual_router_id {{ ha_backup.oam_virtual_router_id }}
priority 10 priority 10
advert_int 1 advert_int 1
authentication { authentication {
auth_type PASS auth_type PASS
auth_pass 1111 auth_pass oamha
} }
virtual_ipaddress { virtual_ipaddress {
192.168.44.244 {{ ha_backup.virtual_ip }}
} }
#触发的脚本 #触发的脚本
track_script { track_script {
chk_http_service #检测脚本,上面配置的 check_service_health #检测脚本,上面配置的
} }
} }
virtual_server 192.168.44.244 80 {
delay_loop 6
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCP
#real_server 127.0.0.1 80 {
# weight 1
#}
}

View File

@@ -1,46 +0,0 @@
! Configuration File for keepalived
global_defs {
router_id LVSTEST1
}
#监控服务httpd, mysql等
vrrp_script chk_http_service {
script "/etc/keepalived/chk_http_service.sh"
#每2s检查一次
interval 3
#每次检查-20
weight -10
fail 3
#失败次数如果请求失败2次就认为此节点资源发生故障将进行切换
rise 1
#监测成功就立即成功,如果请求一次成功就默认此节点资源恢复正常
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.44.244
}
#触发的脚本
track_script {
chk_http_service #检测脚本,上面配置的
}
}
virtual_server 192.168.44.244 80 {
delay_loop 6
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCP
}

View File

@@ -1,50 +0,0 @@
! Configuration File for keepalived
global_defs {
router_id LVSTEST2
}
#监控服务httpd, mysql等
vrrp_script chk_http_service {
script "/etc/keepalived/chk_http_service.sh"
#每2s检查一次
interval 3
#每次检查-20
weight -10
fail 3
#失败次数如果请求失败2次就认为此节点资源发生故障将进行切换
rise 1
#监测成功就立即成功,如果请求一次成功就默认此节点资源恢复正常
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 10
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.44.244
}
#触发的脚本
track_script {
chk_http_service #检测脚本,上面配置的
}
}
virtual_server 192.168.44.244 80 {
delay_loop 6
lb_algo rr
lb_kind NAT
persistence_timeout 50
protocol TCP
#real_server 127.0.0.1 80 {
# weight 1
#}
}

View File

@@ -21,7 +21,7 @@ vrrp_script check_service_health {
vrrp_instance VI_1 { vrrp_instance VI_1 {
state {{ ha_master.instance_state }} state {{ ha_master.instance_state }}
interface {{ ha_master.ethname }} interface {{ ha_master.ethname }}
virtual_router_id 51 virtual_router_id {{ ha_master.oam_virtual_router_id }}
priority 100 priority 100
advert_int 1 advert_int 1
authentication { authentication {