Compare commits

..

1 Commits

Author SHA1 Message Date
zhangzhihan
af11ad31a4 update dpi 20.08.1 2020-09-01 11:07:04 +08:00
258 changed files with 850 additions and 6196 deletions

View File

@@ -1,100 +0,0 @@
- hosts: adc_mxn
remote_user: root
roles:
- {role: adc_exporter, tags: adc_exporter}
- {role: adc_exporter_proxy, tags: adc_exporter_proxy}
# - {role: switch_rule, tags: switch_rule}
- hosts: adc_mcn0
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn0.yml
roles:
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: sapp, tags: sapp}
- {role: tsg_master, tags: tsg_master}
- {role: kni, tags: kni}
- {role: firewall, tags: firewall}
# - tsg_app
- {role: http_healthcheck,tags: http_healthcheck}
- {role: redis, tags: redis}
- {role: cert-redis, tags: cert-redis}
- {role: maat-redis, tags: maat-redis, when: deploy_mode == "cluster"}
- {role: certstore, tags: certstore}
- {role: telegraf_statistic, tags: telegraf_statistic}
- {role: app_proto_identify, tags: app_proto_identify}
- {role: adc_exporter, tags: adc_exporter}
# - {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-patch}
- {role: docker-env, tags: docker-env}
- {role: tsg-diagnose, tags: tsg-diagnose}
- hosts: adc_mcn1
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn1.yml
roles:
# - tsg-env-mcn1
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: tfe, tags: tfe}
- {role: adc_exporter, tags: adc_exporter}
# - {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-patch}
- {role: tsg-diagnose_sync_ca, tags: tsg-diagnose_sync_ca}
- hosts: adc_mcn2
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn2.yml
roles:
# - tsg-env-mcn2
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: tfe, tags: tfe}
- {role: adc_exporter, tags: adc_exporter}
# - {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-path}
- {role: tsg-diagnose_sync_ca, tags: tsg-diagnose_sync_ca}
- hosts: adc_mcn3
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn3.yml
roles:
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: tfe, tags: tfe}
# - {role: adc_exporter, tags: adc_exporter}
- {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-patch}
- {role: tsg-diagnose_sync_ca, tags: tsg-diagnose_sync_ca}
- hosts: adc_mcn0
remote_user: root
roles:
- {role: tsg-diagnose_stop_sync, tags: tsg-diagnose_stop_sync}
- hosts: packet_dump_server
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
roles:
- {role: framework, tags: framework}
- {role: packet_dump, tags: packet_dump}
- hosts: app_global
remote_user: root
vars_files:
- install_config/group_vars/app_global.yml
roles:
- {role: app_global, tags: app_global}

58
deploy.yml Normal file
View File

@@ -0,0 +1,58 @@
- hosts: Functional_Host
roles:
- framework
- kernel-ml
- hosts: blade-00
roles:
# - tsg-env-mcn0
- mrzcpd
- sapp
- tsg_master
- kni
- firewall
- http_healthcheck
- clotho
- certstore
- cert-redis
- telegraf_statistic
- hosts: blade-01
roles:
# - tsg-env-mcn1
- mrzcpd
- tfe
- hosts: blade-02
roles:
# - tsg-env-mcn2
- mrzcpd
- tfe
- hosts: blade-03
roles:
# - tsg-env-mcn3
- mrzcpd
- tfe
- hosts: blade-mxn
roles:
# - tsg-env-mxn
- hosts: pc-as-tun-mode
roles:
- kernel-ml
- framework
- mrzcpd
- tsg-env-tun-mode
- sapp
- tsg_master
- kni
- firewall
- http_healthcheck
- clotho
- certstore
- cert-redis
- tfe
- telegraf_statistic
- proxy_status

View File

@@ -1,124 +0,0 @@
#########################################
#####1: Inline_device; 2: Allot; 3: ADC_Tun_mode;
tsg_access_type: 2
#####2: ADC;
tsg_running_type: 2
#####deploy mode: cluster, single
deploy_mode: "cluster"
########################################
#Deploy_finished_reboot
Deploy_finished_reboot: 0
########################################
#IP Config
maat_redis_city_server:
address: "10.4.62.253"
port: 7002
maat_redis_server:
address: "192.168.100.1"
port: 7002
port_num: 1
db: 0
dynamic_maat_redis_server:
address: "192.168.100.1"
port: 7002
port_num: 1
db: 1
cert_store_server:
address: "192.168.100.1"
port: 9991
log_kafkabrokers:
address: ['1.1.1.1:9092','2.2.2.2:9092']
log_minio:
address: "10.4.62.253"
port: 9090
#########################################
#Log Level Config
#日志等级 10:DEBUG 20:INFO 30:FATAL
fw_ftp_log_level: 10
fw_mail_log_level: 10
fw_http_log_level: 10
fw_dns_log_level: 10
fw_quic_log_level: 10
app_control_log_level: 10
capture_packet_log_level: 10
tsg_log_level: 10
tsg_master_log_level: 10
kni_log_level: 10
#日志等级 DEBUG INFO FATAL
tfe_log_level: FATAL
tfe_http_log_level: FATAL
pangu_log_level: FATAL
doh_log_level: FATAL
certstore_log_level: FATAL
packet_dump_log_level: 10
#######################################
#Sapp Performance Config
#Sapp工作在ADC计算板0时建议使用如下30+8的配置以保证更高的处理性能
sapp:
worker_threads: 42
send_only_threads_max: 1
bind_mask: 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43
inbound_route_dir: 1
########################################
#Kni Config
kni:
global:
tfe_node_count: 3
watch_dog:
switch: 1
maat:
readconf_mode: 2
send_logger:
switch: 1
tfe_nodes:
tfe0_enabled: 1
tfe1_enabled: 1
tfe2_enabled: 1
########################################
#Tfe Config
tfe:
nr_threads: 32
mirror_enable: 1
########################################
#Marsio Config
#marsio工作在ADC计算板时建议使用如下配置以保证更高的处理性能
mcn0_mrzcpd:
iocore: 52,53,54,55
mcn123_mrzcpd:
iocore: 54,55
mrtunnat:
lcore_id: 48,49,50,51
#########################################
#Tsg_app
tsg_app_enable: 0
app_global_ip: "1.1.1.1"
applog_level: 10
app_master_log_level: 10
app_sketch_local_log_level: 10
app_control_plug_log_level: 10
breakpad_upload_url: http://10.4.63.4:9000/api/2/minidump/?sentry_key=3203b43fd5384a7dbe6a48ecb1f3c595
data_center: Kyzylorda
tsg_master_entrance_id: 9
nic_mgr:
name: em1
sapp_prometheus_enable: 1
sapp_prometheus_port: 9273
sapp_prometheus_url_path: "/metrics"

View File

@@ -1,41 +0,0 @@
#########################################
#Mcn0管理口网卡名
nic_mgr:
name: ens1f3
#########################################
#Mcn0流量接入网卡固定配置
nic_data_incoming:
name: ens1f4
#########################################
#Mcn0其他数据口网卡名配置固定配置
nic_inner_ctrl:
name: ens1.100
nic_to_tfe:
tfe0:
name: ens1f5
tfe1:
name: ens1f6
tfe2:
name: ens1f7
#########################################
#串联设备接入相关配置
inline_device_config:
keepalive_ip: 192.168.1.30
keepalive_mask: 255.255.255.252
#########################################
#Allot接入相关配置
AllotAccess:
#virturlInterface_1: ens1f2.103
#virturlInterface_2: ens1f2.104
virturlID_1: 1201
virturlID_2: 1202
virturlID_3: 1301
virturlID_4: 1302
#vvipv4_mask: 24
#vvipv6_mask: 64
bladename: mcn0

View File

@@ -1,19 +0,0 @@
#########################################
#Mcn1管理口网卡名
nic_mgr:
name: ens1f3
#########################################
#Mcn1流量接入网卡固定配置
nic_data_incoming:
name: ens1f1
#########################################
#Mcn1其他数据口网卡名配置固定配置
nic_inner_ctrl:
name: ens1.100
nic_traffic_mirror:
name: ens1f2
use_mrzcpd: 1
bladename: mcn1

View File

@@ -1,19 +0,0 @@
#########################################
#Mcn2管理口网卡名
nic_mgr:
name: ens8f3
#########################################
#Mcn2流量接入网卡固定配置
nic_data_incoming:
name: ens8f1
#########################################
#Mcn2其他数据口网卡名配置固定配置
nic_inner_ctrl:
name: ens8.100
nic_traffic_mirror:
name: ens8f2
use_mrzcpd: 1
bladename: mcn2

View File

@@ -1,19 +0,0 @@
#########################################
#Mcn3管理口网卡名
nic_mgr:
name: ens8f3
#########################################
#Mcn3流量接入网卡固定配置
nic_data_incoming:
name: ens8f1
#########################################
#Mcn3其他数据口网卡名配置固定配置
nic_inner_ctrl:
name: ens8.100
nic_traffic_mirror:
name: ens8f2
use_mrzcpd: 1
bladename: mcn3

View File

@@ -0,0 +1,101 @@
#########################################
#####0: Pcap; 1: Inline_device; 2: Allot; 3: ADC_Tun_mode; 4: ATCA;
tsg_access_type: 4
#####0: Tun_mode; 1: normal; 2: ADC;
tsg_running_type: 1
#Common combination mode:
#1:Server or PC tun mode: 0 + 0
#2:Server with Inline device: 1 + 1
#3:ADC with Inline device: 1 + 2
#4:ADC with Allot: 2 + 2
#5:ADC tun mode: 3 + 1
#6:ATCA: 4 + 1
########################################
maat_redis_server:
address: "192.168.40.168"
port: 7002
db: 0
dynamic_maat_redis_server:
address: "192.168.40.168"
port: 7002
db: 0
cert_store_server:
address: "192.168.100.1"
port: 9991
log_kafkabrokers:
address: "1.1.1.1:9092,2.2.2.2:9092"
log_minio:
address: "192.168.40.168;"
port: 9090
fs_remote:
switch: 1
address: "192.168.100.1"
port: 58125
########################################
sapp:
worker_threads: 16
send_only_threads_max: 8
bind_mask: 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
inbound_route_dir: 1
########################################
kni:
global:
log_level: 30
tfe_node_count: 3
watch_dog:
switch: 1
maat:
readconf_mode: 2
send_logger:
switch: 1
tfe_nodes:
tfe0_enabled: 1
tfe1_enabled: 1
tfe2_enabled: 1
########################################
tfe:
nr_threads: 32
mc_cache_eth: lo
keykeeper:
no_cache: 0
########################################
mrzcpd:
iocore: 39
mrtunnat:
lcore_id: 38
#############ATCA config################
nic_data_incoming:
ethname: enp1s0
vf0_name: enp1s2
vf1_name: enp1s2f1
vf2_name: enp1s2f2
VlanFlipping:
vlanID_1: 100
vlanID_2: 101
vlanID_3: 103
vlanID_4: 104
#############Server or PC tun mode######
server:
ethname: eth0
tun_name: eth0.100
internal_interface: "eth2"
external_interface: "eth3"

View File

@@ -1,10 +0,0 @@
#########################################
app_sketch_global_log_level: 10
maat_redis_server:
address: "192.168.40.168"
port: 7002
db: 0
file_stat_ip: "1.1.1.1"

View File

@@ -0,0 +1,23 @@
nic_mgr:
name: enp6s0
nic_data_incoming:
name: ens1f4
ip: 192.168.1.30
mask: 255.255.255.252
nic_inner_ctrl:
name: ens1.100
nic_to_tfe:
tfe0:
name: ens1f5
tfe1:
name: ens1f6
tfe2:
name: ens1f7
AllotAccess:
virturlInterface_1: ens1f2.103
virturlInterface_2: ens1f2.104
virturlID_1: 103
virturlID_2: 104
vvipv4_mask: 24
vvipv6_mask: 64

View File

@@ -0,0 +1,11 @@
nic_mgr:
name: enp6s0
nic_data_incoming:
name: ens1f1
mac: AA:BB:CC:DD:EE:FF
address: 127.0.0.1
nic_inner_ctrl:
name: ens1.100
nic_traffic_mirror:
name: ens1f2
use_mrzcpd: 1

View File

@@ -0,0 +1,10 @@
nic_mgr:
name: enp6s0
nic_data_incoming:
name: ens8f1
mac: AA:BB:CC:DD:EE:FF
nic_inner_ctrl:
name: ens8.100
nic_traffic_mirror:
name: ens8f2
use_mrzcpd: 1

View File

@@ -0,0 +1,10 @@
nic_mgr:
name: enp6s0
nic_data_incoming:
name: ens8f1
mac: AA:BB:CC:DD:EE:FF
nic_inner_ctrl:
name: ens8.100
nic_traffic_mirror:
name: ens8f2
use_mrzcpd: 1

View File

@@ -1,166 +0,0 @@
#########################################
#####0: Pcap; 1: Inline_device; 5:ATCA_VXLAN;
tsg_access_type: 0
#####0: Tun_mode; 1: normal;
tsg_running_type: 0
#####deploy mode: cluster, single
deploy_mode: "single"
########################################
#Deploy_finished_reboot
Deploy_finished_reboot: 0
########################################
#Server Basic Config
nic_mgr:
name: eth0
nic_inner_ctrl:
name: eth0.100
#########################################
#IP Config
#maat_redis_city_serve相关配置只在部署集群模式时使用
maat_redis_city_server:
address: ""
port:
maat_redis_server:
address: "#Bifang IP#"
port: 7002
port_num: 1
db: 0
dynamic_maat_redis_server:
address: "#Bifang IP#"
port: 7002
port_num: 1
db: 1
cert_store_server:
address: "192.168.100.1"
port: 9991
log_kafkabrokers:
address: ['1.1.1.1:9092','2.2.2.2:9092']
log_minio:
address: "10.9.62.253"
port: 9090
#########################################
#Log Level Config
#日志等级 10:DEBUG 20:INFO 30:FATAL
fw_ftp_log_level: 10
fw_mail_log_level: 10
fw_http_log_level: 10
fw_dns_log_level: 10
fw_quic_log_level: 10
app_control_log_level: 10
capture_packet_log_level: 10
tsg_log_level: 10
tsg_master_log_level: 10
kni_log_level: 10
#日志等级 DEBUG INFO FATAL
tfe_log_level: FATAL
tfe_http_log_level: FATAL
pangu_log_level: FATAL
doh_log_level: FATAL
certstore_log_level: 10
packet_dump_log_level: 10
#########################################
#Sapp Performance Config
#如果tsg_access_type=0sapp跑在pcap模式则以下配置可忽略
sapp:
worker_threads: 23
send_only_threads_max: 1
bind_mask: 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24
inbound_route_dir: 1
#########################################
#Sapp Double-Arm Config
packet_io:
internal_interface: eth2
external_interface: eth3
#########################################
#Kni Config
kni:
global:
tfe_node_count: 1
watch_dog:
switch: 1
maat:
readconf_mode: 2
send_logger:
switch: 1
tfe_nodes:
tfe0_enabled: 1
tfe1_enabled: 0
tfe2_enabled: 0
#########################################
#Tfe Config
tfe:
nr_threads: 32
mirror_enable: 1
#########################################
#Marsio Config
mrzcpd:
iocore: 39
mrtunnat:
lcore_id: 38
#########################################
#Tsg_app
tsg_app_enable: 1
app_global_ip: "1.1.1.1"
applog_level: 10
app_master_log_level: 10
app_sketch_local_log_level: 10
app_control_plug_log_level: 10
#########################################
#ATCA Config
#下列配置只在tsg_access_type=4时生效
ATCA_data_incoming:
ethname: enp1s0
vf0_name: enp1s2
vf1_name: enp1s2f1
vf2_name: enp1s2f2
ATCA_VlanFlipping:
vlanID_1: 100
vlanID_2: 101
vlanID_3: 103
vlanID_4: 104
#下列配置只在tsg_access_type=5时生效
ATCA_VXLAN:
keepalive_ip: "10.254.19.1"
keepalive_mask: "255.255.255.252"
#########################################
#Inline Device Config
inline_device_config:
keepalive_ip: 192.168.1.30
keepalive_mask: 255.255.255.252
data_incoming: eth5
#########################################
#新增配置项,均为默认值不用改
breakpad_upload_url: http://127.0.0.1:9000/api/2/minidump/?sentry_key=3556bac347c74585a994eb6823faf5c6
data_center: Beijing
tsg_master_entrance_id: 0
sapp_prometheus_enable: 1
sapp_prometheus_port: 9273
sapp_prometheus_url_path: "/metrics"

View File

@@ -1,45 +1,26 @@
###################
# For example #
###################
#变量device_id根据设备序号设置即可
#变量vvipv4_1、vvipv4_2、vvipv6_1、vvipv6_2为Allot相关配置其他环境可不填或直接删除变量
#
#20.09版本新增APP部署
#[app_global]
#0.0.0.0
[all:vars]
ansible_user=root
package_source=local
#[server_as_tun_mode]
#1.1.1.1 device_id=device_1
#
#[adc_mxn]
#10.3.72.1
#10.3.72.2
#
#[adc_mcn0]
#10.3.73.1 device_id=device_1 vvipv4_1=10.3.61.1 vvipv4_2=10.3.62.1 vvipv6_1=fc00::61:1 vvipv6_2=fc00::62:1
#10.3.73.2 device_id=device_2 vvipv4_1=10.3.61.2 vvipv4_2=10.3.62.2 vvipv6_1=fc00::61:2 vvipv6_2=fc00::62:2
#
#[adc_mcn1]
#10.3.74.1 device_id=device_1
#10.3.74.2 device_id=device_2
#
#[adc_mcn2]
#10.3.75.1 device_id=device_1
#10.3.75.2 device_id=device_2
#
#[adc_mcn3]
#10.3.76.1 device_id=device_1
#10.3.76.2 device_id=device_2
[pc-as-tun-mode]
#[app_global]
#[server_as_tun_mode]
#broken warning:
#10.4.52.71
[adc_mcn0]
[adc_mcn1]
[adc_mcn2]
[adc_mcn3]
[app_global]
[server_as_tun_mode]
[blade-mxn]
1.1.1.1 device_id=1
[blade-00]
1.1.1.1 device_id=1 vvipv4_1= vvipv4_2= vvipv6_1= vvipv6_2=
[blade-01]
1.1.1.1 device_id=1
[blade-02]
1.1.1.1 device_id=1
[blade-03]
1.1.1.1 device_id=1
[Functional_Host:children]
blade-00
blade-01
blade-02
blade-03

View File

@@ -1,72 +0,0 @@
- name: "copy freeipmi tools"
copy:
src: '{{ role_path }}/files/freeipmi-1.5.7-3.el7.x86_64.rpm'
dest: /tmp/ansible_deploy/
- name: "Install freeipmi rpm package"
yum:
name:
- "/tmp/ansible_deploy/freeipmi-1.5.7-3.el7.x86_64.rpm"
state: present
- name: "mkdir /opt/adc-exporter/"
file:
path: /opt/adc-exporter/
state: directory
- name: "copy node_exporter"
copy:
src: '{{ role_path }}/files/node_exporter'
dest: /opt/adc-exporter/node_exporter
mode: 0755
- name: "copy systemd_exporter"
copy:
src: '{{ role_path }}/files/systemd_exporter'
dest: /opt/adc-exporter/systemd_exporter
mode: 0755
- name: "copy ipmi_exporter"
copy:
src: '{{ role_path }}/files/ipmi_exporter'
dest: /opt/adc-exporter/ipmi_exporter
mode: 0755
- name: "templates adc-exporter-node.service"
template:
src: "{{role_path}}/templates/adc-exporter-node.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-node.service
tags: template
- name: "templates adc-exporter-systemd.service"
template:
src: "{{role_path}}/templates/adc-exporter-systemd.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-systemd.service
tags: template
- name: "templates adc-exporter-ipmi.service"
template:
src: "{{role_path}}/templates/adc-exporter-ipmi.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-ipmi.service
tags: template
- name: 'adc-exporter-node service start'
systemd:
name: adc-exporter-node
enabled: yes
daemon_reload: yes
state: started
- name: 'adc-exporter-systemd service start'
systemd:
name: adc-exporter-systemd
enabled: yes
daemon_reload: yes
state: restarted
- name: 'adc-exporter-ipmi service start'
systemd:
name: adc-exporter-ipmi
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1,11 +0,0 @@
[Unit]
Description=IPMI Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/ipmi_exporter
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Node Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/node_exporter
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Systemd Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/systemd_exporter --web.disable-exporter-metrics
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,23 +0,0 @@
- name: "mkdir /opt/adc-exporter/"
file:
path: /opt/adc-exporter/
state: directory
- name: "copy ping_exporter"
copy:
src: '{{ role_path }}/files/ping_exporter'
dest: /opt/adc-exporter/ping_exporter
mode: 0755
- name: "templates ping_exporter.service"
template:
src: "{{role_path}}/templates/adc-exporter-ping.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-ping.service
tags: template
- name: 'adc-exporter-ping service start'
systemd:
name: adc-exporter-ping
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Ping Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/ping_exporter {{ ping_test.target|join(" ")}} --ping.size=512 --ping.interval=0.5s
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,34 +0,0 @@
- name: "mkdir /opt/adc-exporter-proxy/"
file:
path: /opt/adc-exporter-proxy/
state: directory
- name: "copy file to device"
copy:
src: '{{ role_path }}/files/'
dest: /tmp/ansible_deploy/
- name: "unarchive adc-exporter-proxy(NGINX)"
unarchive:
src: /tmp/ansible_deploy/adc_exporter_proxy.tar.gz
dest: /opt/adc-exporter-proxy
remote_src: yes
- name: "templates adc-exporter-proxy.service"
template:
src: "{{role_path}}/templates/adc-exporter-proxy.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-proxy.service
tags: template
- name: "template nginx.conf"
template:
src: "{{role_path}}/templates/nginx.conf.j2"
dest: /opt/adc-exporter-proxy/adc-exporter-proxy/conf/nginx.conf
tags: template
- name: 'adc-exporter-proxy service start'
systemd:
name: adc-exporter-proxy
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1,12 +0,0 @@
[Unit]
Description=ADC Exporter Proxy (NGINX) for NEZHA
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter-proxy/adc-exporter-proxy/sbin/nginx -p /opt/adc-exporter-proxy/adc-exporter-proxy
ExecReload=/opt/adc-exporter-proxy/adc-exporter-proxy/sbin/nginx -p /opt/adc-exporter-proxy/adc-exporter-proxy -s reload
ExecStop=/opt/adc-exporter-proxy/adc-exporter-proxy/sbin/nginx -p /opt/adc-exporter-proxy/adc-exporter-proxy -s stop
[Install]
WantedBy=multi-user.target

View File

@@ -1,152 +0,0 @@
user nobody;
worker_processes 1;
daemon off;
error_log logs/error.log;
error_log logs/error.log notice;
error_log logs/error.log info;
pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
gzip on;
server {
listen 9000;
server_name localhost;
location /metrics/blade/mcn0/node_exporter {
proxy_pass http://192.168.100.1:9100/metrics;
}
location /metrics/blade/mcn1/node_exporter {
proxy_pass http://192.168.100.2:9100/metrics;
}
location /metrics/blade/mcn2/node_exporter {
proxy_pass http://192.168.100.3:9100/metrics;
}
location /metrics/blade/mcn3/node_exporter {
proxy_pass http://192.168.100.4:9100/metrics;
}
location /metrics/blade/mxn/node_exporter {
proxy_pass http://192.168.100.5:9100/metrics;
}
location /metrics/blade/mcn0/systemd_exporter {
proxy_pass http://192.168.100.1:9558/metrics;
}
location /metrics/blade/mcn1/systemd_exporter {
proxy_pass http://192.168.100.2:9558/metrics;
}
location /metrics/blade/mcn2/systemd_exporter {
proxy_pass http://192.168.100.3:9558/metrics;
}
location /metrics/blade/mcn3/systemd_exporter {
proxy_pass http://192.168.100.4:9558/metrics;
}
location /metrics/blade/mcn0/ipmi_exporter {
proxy_pass http://192.168.100.1:9290/metrics;
}
location /metrics/blade/mcn1/ipmi_exporter {
proxy_pass http://192.168.100.2:9290/metrics;
}
location /metrics/blade/mcn2/ipmi_exporter {
proxy_pass http://192.168.100.3:9290/metrics;
}
location /metrics/blade/mcn3/ipmi_exporter {
proxy_pass http://192.168.100.4:9290/metrics;
}
location /metrics/blade/mxn/ipmi_exporter {
proxy_pass http://192.168.100.5:9290/metrics;
}
location /metrics/blade/mcn0/certstore {
proxy_pass http://192.168.100.1:9002/metrics;
}
location /metrics/blade/mcn1/tfe {
proxy_pass http://192.168.100.2:9001/metrics;
}
location /metrics/blade/mcn2/tfe {
proxy_pass http://192.168.100.3:9001/metrics;
}
location /metrics/blade/mcn3/tfe {
proxy_pass http://192.168.100.4:9001/metrics;
}
location /metrics/blade/mcn0/sapp {
proxy_pass http://192.168.100.1:9273/metrics;
}
location /metrics/blade/mcn0/mrapm_device {
proxy_pass http://192.168.100.1:8901/metrics;
}
location /metrics/blade/mcn0/mrapm_stream {
proxy_pass http://192.168.100.1:8902/metrics;
}
location /metrics/blade/mcn1/mrapm_device {
proxy_pass http://192.168.100.2:8901/metrics;
}
location /metrics/blade/mcn1/mrapm_stream {
proxy_pass http://192.168.100.2:8902/metrics;
}
location /metrics/blade/mcn2/mrapm_device {
proxy_pass http://192.168.100.3:8901/metrics;
}
location /metrics/blade/mcn2/mrapm_stream {
proxy_pass http://192.168.100.3:8902/metrics;
}
location /metrics/blade/mcn3/mrapm_device {
proxy_pass http://192.168.100.4:8901/metrics;
}
location /metrics/blade/mcn3/mrapm_stream {
proxy_pass http://192.168.100.4:8902/metrics;
}
location /metrics/blade/mcn0/maat_redis {
proxy_pass http://192.168.100.1:9121/metrics;
}
location /metrics/blade/mcn0/ping_exporter {
proxy_pass http://192.168.100.1:9427/metrics;
}
}
}

View File

@@ -1,36 +0,0 @@
- name: "copy app_global rpm to destination server"
copy:
src: "{{ role_path }}/files/"
dest: /tmp/ansible_deploy/
- name: "install app rpms from localhost"
yum:
name:
- /tmp/ansible_deploy/emqx-centos7-v4.1.2.x86_64.rpm
- /tmp/ansible_deploy/app-sketch-global-1.0.3.202010.a7b2e40-1.el7.x86_64.rpm
state: present
- name: "template the app_sketch_global.conf"
template:
src: "{{ role_path }}/templates/app_sketch_global.conf.j2"
dest: /opt/tsg/app-sketch-global/conf/app_sketch_global.conf
- name: "template the zlog.conf"
template:
src: "{{ role_path }}/templates/zlog.conf.j2"
dest: /opt/tsg/app-sketch-global/conf/zlog.conf
- name: "Start emqx"
systemd:
name: emqx.service
state: started
enabled: yes
daemon_reload: yes
- name: "Start app-sketch-global"
systemd:
name: app-sketch-global.service
state: started
enabled: yes
daemon_reload: yes

View File

@@ -1,41 +0,0 @@
[SYSTEM]
#1:print on screen, 0:don't
DEBUG_SWITCH = 1
RUN_LOG_PATH = "conf/zlog.conf"
[breakpad]
disable_coredump=0
enable_breakpad=1
breakpad_minidump_dir=/tmp/app-sketch-global/crashreport
enable_breakpad_upload=0
breakpad_upload_url={{ breakpad_upload_url }}
[CONFIG]
#Number of running threads
thread-nu = 1
timeout = 3600
address="tcp://127.0.0.1:1883"
topic_name="APP_SIGNATURE_ID"
client_name="ExampleClientSub"
[maat]
# 0:json 1: redis 2: iris
maat_input_mode=1
table_info=./resource/table_info.conf
json_cfg_file=./resource/gtest.json
stat_file=logs/verify-policy.status
full_cfg_dir=verify-policy/
inc_cfg_dir=verify-policy/
maat_redis_server={{ maat_redis_server.address }}
maat_redis_port_range={{ maat_redis_server.port }}
maat_redis_db_index={{ maat_redis_server.db }}
effect_interval_s=1
accept_tags={"tags":[{"tag":"location","value":"Astana"}]}
[stat]
statsd_server={{ file_stat_ip }}
statsd_port=8100
statsd_cycle=5
# FS_OUTPUT_STATSD=1, FS_OUTPUT_INFLUX_LINE=2
statsd_format=2

View File

@@ -1,12 +0,0 @@
[global]
default format = "%d(%c), %V, %F, %U, %m%n"
[levels]
DEBUG=10
INFO=20
FATAL=30
[rules]
*.fatal "./logs/error.log.%d(%F)";
*.{{ app_sketch_global_log_level }} "./logs/app_sketch_global.log.%d(%F)"

View File

@@ -1,14 +0,0 @@
---
- name: "copy app_proto_identify rpm package destination server"
copy:
src: "{{ role_path }}/files/"
dest: /tmp/ansible_deploy/
- name: "install app_proto_identify"
yum:
name: "{{ app_packages }}"
state: present
skip_broken: yes
vars:
app_packages:
- /tmp/ansible_deploy/app_proto_identify-1.0.7.a5113ba-2.el7.x86_64.rpm

View File

@@ -1,12 +0,0 @@
[Unit]
Description=Redis persistent key-value database
After=network.target
[Service]
ExecStart=/usr/bin/redis-server /etc/cert-redis.conf --supervised systemd
ExecStop=/usr/libexec/redis-shutdown cert-redis
Type=notify
[Install]
WantedBy=multi-user.target

View File

@@ -160,7 +160,7 @@ loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
#logfile "/opt/tsg/cert-redis/6379/6379.log"
logfile "/home/tsg/cert-redis/6379/6379.log"
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
@@ -244,7 +244,7 @@ dbfilename dump.rdb
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
#dir /opt/tsg/cert-redis/6379/
dir /home/tsg/cert-redis/6379/
################################# REPLICATION #################################

Binary file not shown.

View File

@@ -0,0 +1,16 @@
[Unit]
Description=Redis persistent key-value database
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/usr/local/bin/start-cert-redis
ExecStop=killall redis-server
Type=forking
RuntimeDirectory=redis
RuntimeDirectoryMode=0755
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,6 @@
#!/bin/bash
#
cp -rf redis-server /usr/local/bin/
cp -rf redis-cli /usr/local/bin
cp -rf cert-redis.service /usr/lib/systemd/system/
cp -rf start-cert-redis /usr/local/bin

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,4 @@
#!/bin/bash
#
/usr/local/bin/redis-server /home/tsg/cert-redis/6379/6379.conf

View File

@@ -1,11 +1,11 @@
- name: "copy cert-redis file to dest"
- name: "copy cert-redis to destination server"
copy:
src: "{{ role_path }}/files/"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
with_items:
- { src: "cert-redis.conf" , dest: "/etc" , mode: "0644" }
- { src: "cert-redis.service" , dest: "/usr/lib/systemd/system" , mode: "0644" }
dest: /home/tsg
mode: 0755
- name: "install cert-redis"
shell: cd /home/tsg/cert-redis;sh install.sh
- name: "start cert-redis"
systemd:

View File

@@ -1,3 +0,0 @@
[Service]
MemoryLimit=16G
ExecStartPost=/bin/bash -c "echo 16G > /sys/fs/cgroup/memory/system.slice/certstore.service/memory.memsw.limit_in_bytes"

View File

@@ -3,31 +3,20 @@
src: "{{ role_path }}/files/"
dest: "/tmp/ansible_deploy/"
- name: Ensures /opt/tsg exists
file: path=/opt/tsg state=directory
- name: Ensures /home/tsg exists
file: path=/home/tsg state=directory
tags: mkdir
- name: install certstore
yum:
name:
- /tmp/ansible_deploy/certstore-2.1.6.20201215.f2e9ba7-1.el7.x86_64.rpm
- /tmp/ansible_deploy/certstore-2.1.2.20200828.f507b3e-1.el7.x86_64.rpm
state: present
- name: template certstore configure file
template:
src: "{{ role_path }}/templates/cert_store.ini.j2"
dest: /opt/tsg/certstore/conf/cert_store.ini
- name: template certstore zlog file
template:
src: "{{ role_path }}/templates/zlog.conf.j2"
dest: /opt/tsg/certstore/conf/zlog.conf
- name: "copy memory limit file to certstore.service.d"
copy:
src: "{{ role_path }}/files/memory.conf"
dest: /etc/systemd/system/certstore.service.d/
mode: 0644
dest: /home/tsg/certstore/conf/cert_store.ini
- name: "start certstore"
systemd:

View File

@@ -1,15 +1,9 @@
[SYSTEM]
#1:print on screen, 0:don't
DEBUG_SWITCH = 1
RUN_LOG_PATH = "conf/zlog.conf"
[breakpad]
disable_coredump=0
enable_breakpad=1
breakpad_minidump_dir=/tmp/certstore/crashreport
enable_breakpad_upload=1
breakpad_upload_url= {{ breakpad_upload_url }}
#10:DEBUG, 20:INFO, 30:FATAL
RUN_LOG_LEVEL = 10
RUN_LOG_PATH = ./logs
[CONFIG]
#Number of running threads
thread-nu = 4
@@ -20,8 +14,7 @@ expire_after = 30
#Local default root certificate path
local_debug = 1
ca_path = ./cert/tango-ca-v3-trust-ca.pem
untrusted_ca_path = ./cert/tango-ca-v3-untrust-ca.pem
untrusted_ca_path = ./cert/mesalab-ca-untrust.pem
[MAAT]
#Configure the load mode,
#0: using the configuration distribution network
@@ -38,23 +31,18 @@ inc_cfg_dir=./rule/inc/index
full_cfg_dir=./rule/full/index
#Json file path when json schema is used
pxy_obj_keyring=./conf/pxy_obj_keyring.json
[LIBEVENT]
#Local monitor port number, default is 9991
port = 9991
[CERTSTORE_REDIS]
#The Redis server IP address and port number where the certificate is stored locally
ip = 127.0.0.1
port = 6379
[MAAT_REDIS]
#Maat monitors the Redsi server IP address and port number
ip = {{ maat_redis_server.address }}
port = {{ maat_redis_server.port }}
dbindex = {{ maat_redis_server.db }}
[stat]
statsd_server=127.0.0.1
statsd_port=8100
statsd_set_prometheus_port=9002
statsd_set_prometheus_url_path=/metrics
statsd_server=192.168.100.1
statsd_port=8126

View File

@@ -1,10 +0,0 @@
[global]
default format = "%d(%c), %V, %F, %U, %m%n"
[levels]
DEBUG=10
INFO=20
FATAL=30
[rules]
*.fatal "./logs/error.log.%d(%F)";
*.{{ certstore_log_level }} "./logs/certstore.log.%d(%F)"

View File

@@ -0,0 +1,13 @@
[Unit]
Description=clotho
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/home/mesasoft/clotho/clotho
ExecStop=killall clotho
Type=forking
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,29 @@
- name: "copy clotho rpm to destination server"
copy:
src: "{{ role_path }}/files/clotho-debug-1.0.0.-1.el7.x86_64.rpm"
dest: /tmp/ansible_deploy/
- name: "copy clotho.service to destination server"
copy:
src: "{{ role_path }}/files/clotho.service"
dest: /usr/lib/systemd/system
mode: 0755
- name: "install clotho rpm from localhost"
yum:
name:
- /tmp/ansible_deploy/clotho-debug-1.0.0.-1.el7.x86_64.rpm
state: present
- name: "Template the clotho.conf"
template:
src: "{{ role_path }}/templates/clotho.conf.j2"
dest: /home/mesasoft/clotho/conf/clotho.conf
tags: template
- name: "start clotho"
systemd:
name: clotho.service
enabled: yes
daemon_reload: yes

View File

@@ -0,0 +1,11 @@
[KAFKA]
BROKER_LIST={{ log_kafkabrokers.address }}
[SYSTEM]
{% if tsg_running_type == 0 or 1 %}
NIC_NAME={{ server.ethname }}
{% else %}
NIC_NAME={{ nic_mgr.name }}
{% endif %}
LOG_LEVEL=10
LOG_PATH=log/clotho

Binary file not shown.

Binary file not shown.

View File

@@ -1,38 +0,0 @@
---
- name: "docker-ce: copy docker-ce.zip to dest device"
copy:
src: '{{ role_path }}/files/docker-ce.zip'
dest: /tmp/ansible_deploy/
- name: "docker-ce: unarchive docker-ce.zip"
unarchive:
src: /tmp/ansible_deploy/docker-ce.zip
dest: /tmp/ansible_deploy/
remote_src: yes
- name: "docker-ce: install docker-ce rpm package and dependencies"
yum:
name:
- /tmp/ansible_deploy/docker-ce/container-selinux-2.119.2-1.911c772.el7_8.noarch.rpm
- /tmp/ansible_deploy/docker-ce/docker-ce-19.03.13-3.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/docker-ce-cli-19.03.13-3.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/containerd.io-1.3.7-3.1.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/selinux-policy-targeted-3.13.1-266.el7_8.1.noarch.rpm
- /tmp/ansible_deploy/docker-ce/selinux-policy-3.13.1-266.el7_8.1.noarch.rpm
- /tmp/ansible_deploy/docker-ce/policycoreutils-python-2.5-34.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/policycoreutils-2.5-34.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/libselinux-utils-2.5-15.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/libselinux-python-2.5-15.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/libselinux-2.5-15.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/setools-libs-3.3.8-4.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/libsepol-2.5-10.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/libsemanage-python-2.5-14.el7.x86_64.rpm
- /tmp/ansible_deploy/docker-ce/libsemanage-2.5-14.el7.x86_64.rpm
state: present
- name: "docker-ce: systemctl start docker and enabled docker"
systemd:
name: docker
enabled: yes
daemon_reload: yes
state: started

View File

@@ -1,18 +0,0 @@
---
- name: "docker-compose: copy docker-compose.zip to dest device"
copy:
src: '{{ role_path }}/files/docker-compose.zip'
dest: /tmp/ansible_deploy/
- name: "docker-compose: unarchive docker-compose.zip"
unarchive:
src: /tmp/ansible_deploy/docker-compose.zip
dest: /tmp/ansible_deploy/
remote_src: yes
- name: "docker-compose: install docker-compose using pip3"
pip:
requirements: /tmp/ansible_deploy/docker-compose/requirements.txt
extra_args: "--no-index --find-links=file:///tmp/ansible_deploy/docker-compose"
state: forcereinstall
executable: pip3

View File

@@ -1,4 +0,0 @@
---
- include: docker-ce.yml
- include: python3.yml
- include: docker-compose.yml

View File

@@ -1,21 +0,0 @@
---
- name: "python3: copy python3.zip to dest device"
copy:
src: '{{ role_path }}/files/python3.zip'
dest: /tmp/ansible_deploy/
- name: "python3: unarchive python3.zip"
unarchive:
src: /tmp/ansible_deploy/python3.zip
dest: /tmp/ansible_deploy/
remote_src: yes
- name: "python3: install python3 rpm package and dependencies"
yum:
name:
- /tmp/ansible_deploy/python3/python3-libs-3.6.8-13.el7.x86_64.rpm
- /tmp/ansible_deploy/python3/python3-3.6.8-13.el7.x86_64.rpm
- /tmp/ansible_deploy/python3/python3-pip-9.0.3-7.el7_7.noarch.rpm
- /tmp/ansible_deploy/python3/python3-setuptools-39.2.0-10.el7.noarch.rpm
- /tmp/ansible_deploy/python3/libtirpc-0.2.4-0.16.el7.x86_64.rpm
state: present

View File

@@ -11,22 +11,22 @@
skip_broken: yes
vars:
fw_packages:
- /tmp/ansible_deploy/capture_packet_plug-3.0.6.a2db4a4-2.el7.x86_64.rpm
- /tmp/ansible_deploy/conn_telemetry-1.0.2.8d6da43-2.el7.x86_64.rpm
- /tmp/ansible_deploy/dns-2.0.9.b639626-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ftp-1.0.8.13d5fda-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_dns_plug-3.0.2.dab58fa-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ftp_plug-3.0.1.0a78573-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_http_plug-3.0.4.484b54d-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_mail_plug-3.0.2.7401550-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_quic_plug-3.0.4.947ef77-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ssl_plug-3.0.6.a121701-2.el7.x86_64.rpm
- /tmp/ansible_deploy/http-2.0.5.c61ad9a-2.el7.x86_64.rpm
- /tmp/ansible_deploy/mail-1.0.9.c1d3bde-2.el7.x86_64.rpm
- /tmp/ansible_deploy/quic-1.1.17.8c22b4d-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ssl-1.0.12.16b8fb5-2.el7.x86_64.rpm
- /tmp/ansible_deploy/tsg_conn_sketch-2.0.12.0ad5a3b-2.el7.x86_64.rpm
- /tmp/ansible_deploy/app_control_plug-1.0.9.97846eb-2.el7.x86_64.rpm
- /tmp/ansible_deploy/capture_packet_plug-3.0.2.09f193c-2.el7.x86_64.rpm
- /tmp/ansible_deploy/clotho-debug-1.0.0.-1.el7.x86_64.rpm
- /tmp/ansible_deploy/dns-2.0.6.d8317e9-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ftp-1.0.6.2710506-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_dns_plug-3.0.0.0a5d574-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ftp_plug-3.0.0.7a867ea-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_http_plug-3.0.0.1ca1c65-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_mail_plug-3.0.0.3b4e481-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_quic_plug-3.0.0.b06d39c-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ssl_plug-3.0.1.7ea9976-2.el7.x86_64.rpm
- /tmp/ansible_deploy/http-2.0.3.9218b4b-2.el7.x86_64.rpm
- /tmp/ansible_deploy/mail-1.0.7.9e3be05-2.el7.x86_64.rpm
- /tmp/ansible_deploy/quic-1.1.6.d6755d8-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ssl-1.0.3.e8482a4-2.el7.x86_64.rpm
- /tmp/ansible_deploy/tsg_conn_record-1.0.2.2afb19a-2.el7.x86_64.rpm
- /tmp/ansible_deploy/tsg_conn_sketch-2.0.v2.0_alpha.af621ca-2.el7.x86_64.rpm
- name: "Template the tsgconf/main.conf"
template:
@@ -46,15 +46,3 @@
src: "{{ role_path }}/templates/capture_packet_plug.conf.j2"
dest: /home/mesasoft/sapp_run/conf/capture_packet_plug.conf
tags: template
- name: "Template the tsgconf/app_l7_proto_id.conf"
template:
src: "{{ role_path }}/templates/app_l7_proto_id.conf.j2"
dest: /home/mesasoft/sapp_run/tsgconf/app_l7_proto_id.conf
- name: "Template the /home/mesasoft/sapp_run/plug/business/tsg_conn_sketch/tsg_conn_sketch.inf"
template:
src: "{{ role_path }}/templates/tsg_conn_sketch.inf.j2"
dest: /home/mesasoft/sapp_run/plug/business/tsg_conn_sketch/tsg_conn_sketch.inf
tags: template

View File

@@ -1,51 +0,0 @@
#TYPE1:UCHAR,2:USHORT,3:USTRING,4:ULOG,5:USTRING,6:FILE,7:UBASE64,8:PACKET
#TYPE FIELD VALUE
STRING UNCATEGORIZED 100
STRING UNCATEGORIZED 101
STRING UNKNOWN_OTHER 102
STRING DNS 103
STRING FTP 104
STRING FTPS 105
STRING HTTP 106
STRING HTTPS 107
STRING ICMP 108
STRING IKE 109
STRING MAIL 110
STRING IMAPS 111
STRING IPSEC 112
STRING XMPP 113
STRING L2TP 114
STRING NTP 115
STRING POP3S 117
STRING PPTP 118
STRING QUIC 119
STRING SIP 120
STRING SMB 121
STRING SMTPS 123
STRING SPDY 124
STRING SSH 125
STRING SSL 126
STRING SOCKS 127
STRING TELNET 128
STRING DHCP 129
STRING RADIUS 130
STRING OPENVPN 131
STRING STUN 132
STRING TEREDO 133
STRING DTLS 134
STRING DoH 135
STRING ISAKMP 136
STRING MDNS 137
STRING NETBIOS 138
STRING NETFLOW 139
STRING RDP 140
STRING RTCP 141
STRING RTP 142
STRING SLP 143
STRING SNMP 144
STRING SSDP 145
STRING TFTP 146
STRING BJNP 147
STRING LDAP 148
STRING RTMP 149
STRING RTSP 150

View File

@@ -1,28 +1,29 @@
[MAAT]
MAAT_MODE=2
#EFFECTIVE_FLAG=
STAT_SWITCH=1
PERF_SWITCH=1
TABLE_INFO=conf/capture_packet_tableinfo.conf
STAT_FILE=capture_packet_maat.status
EFFECT_INTERVAL_S=1
REDIS_IP={{ maat_redis_server.address }}
REDIS_PORT_NUM={{ maat_redis_server.port_num }}
REDIS_PORT={{ maat_redis_server.port }}
REDIS_INDEX={{ maat_redis_server.db }}
JSON_CFG_FILE=conf/capture_packet_maat.json
INC_CFG_DIR=capture_packet_rule/inc/index/
FULL_CFG_DIR=capture_packet_rule/full/index/
EFFECTIVE_RANGE_FILE=/opt/tsg/etc/tsg_device_tag.json
ACCEPT_TAGS={"tags":[{"tag":"data_center","value":"{{ data_center }}"}]}
[LOG]
NIC_NAME={{ nic_mgr.name }}
BROKER_LIST={{ log_kafkabrokers.address | join(",") }}
FIELD_FILE=conf/capture_packet_log_field.conf
[SYSTEM]
LOG_LEVEL={{ capture_packet_log_level }}
LOG_PATH=./tsglog/capture_packet_plug/capture_packet
[MAAT]
MAAT_MODE=2
#EFFECTIVE_FLAG=
STAT_SWITCH=1
PERF_SWITCH=1
TABLE_INFO=conf/capture_packet_tableinfo.conf
STAT_FILE=capture_packet_maat.status
EFFECT_INTERVAL_S=1
REDIS_IP={{ maat_redis_server.address }}
REDIS_PORT_NUM=1
REDIS_PORT={{ maat_redis_server.port }}
REDIS_INDEX=0
JSON_CFG_FILE=conf/capture_packet_maat.json
INC_CFG_DIR=capture_packet_rule/inc/index/
FULL_CFG_DIR=capture_packet_rule/full/index/
[LOG]
{% if tsg_running_type == 0 or 1 %}
NIC_NAME={{ server.ethname }}
{% else %}
NIC_NAME={{ nic_mgr.name }}
{% endif %}
BROKER_LIST={{ log_kafkabrokers.address }}
FIELD_FILE=conf/capture_packet_log_field.conf
[SYSTEM]
LOG_LEVEL=10
LOG_PATH=./tsglog/capture_packet_plug/capture_packet

Some files were not shown because too many files have changed in this diff Show More