Compare commits

..

1 Commits

Author SHA1 Message Date
zhangzhihan
6444e57629 update 2020-09-15 12:20:23 +08:00
221 changed files with 532 additions and 5284 deletions

View File

@@ -1,90 +0,0 @@
- hosts: adc_mxn
remote_user: root
roles:
- {role: adc_exporter, tags: adc_exporter}
- {role: adc_exporter_proxy, tags: adc_exporter_proxy}
# - {role: switch_rule, tags: switch_rule}
- hosts: adc_mcn0
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn0.yml
roles:
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: sapp, tags: sapp}
- {role: tsg_master, tags: tsg_master}
- {role: kni, tags: kni}
- {role: firewall, tags: firewall}
# - tsg_app
- {role: http_healthcheck,tags: http_healthcheck}
- {role: redis, tags: redis}
- {role: cert-redis, tags: cert-redis}
- {role: maat-redis, tags: maat-redis, when: deploy_mode == "cluster"}
- {role: certstore, tags: certstore}
- {role: telegraf_statistic, tags: telegraf_statistic}
- {role: app_proto_identify, tags: app_proto_identify}
- {role: adc_exporter, tags: adc_exporter}
# - {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-patch}
- hosts: adc_mcn1
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn1.yml
roles:
# - tsg-env-mcn1
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: tfe, tags: tfe}
- {role: adc_exporter, tags: adc_exporter}
# - {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-patch}
- hosts: adc_mcn2
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn2.yml
roles:
# - tsg-env-mcn2
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: tfe, tags: tfe}
- {role: adc_exporter, tags: adc_exporter}
# - {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-path}
- hosts: adc_mcn3
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn3.yml
roles:
- {role: framework, tags: framework}
- {role: kernel-ml, tags: kernel-ml}
- {role: mrzcpd, tags: mrzcpd}
- {role: tfe, tags: tfe}
# - {role: adc_exporter, tags: adc_exporter}
- {role: switch_control, tags: switch_control}
- {role: tsg-env-patch, tags: tsg-env-patch}
- hosts: packet_dump_server
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
roles:
- {role: framework, tags: framework}
- {role: packet_dump, tags: packet_dump}
- hosts: app_global
remote_user: root
vars_files:
- install_config/group_vars/app_global.yml
roles:
- {role: app_global, tags: app_global}

83
deploy.yml Normal file
View File

@@ -0,0 +1,83 @@
- hosts: adc_mxn
remote_user: root
roles:
# - tsg-env-mxn
- hosts: adc_mcn0
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn0.yml
roles:
# - tsg-env-mcn0
- framework
- kernel-ml
- mrzcpd
- sapp
- tsg_master
- kni
- firewall
- http_healthcheck
- clotho
- certstore
- cert-redis
- telegraf_statistic
- tsg_device_tag
- hosts: adc_mcn1
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn1.yml
roles:
# - tsg-env-mcn1
- framework
- kernel-ml
- mrzcpd
- tfe
- hosts: adc_mcn2
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn2.yml
roles:
# - tsg-env-mcn2
- framework
- kernel-ml
- mrzcpd
- tfe
- hosts: adc_mcn3
remote_user: root
vars_files:
- install_config/group_vars/adc_global.yml
- install_config/group_vars/adc_mcn3.yml
roles:
# - tsg-env-mcn3
- framework
- kernel-ml
- mrzcpd
- tfe
- hosts: server-as-tun-mode
remote_user: root
vars_files:
- install_config/group_vars/server_as_tun_mode.yml
roles:
- kernel-ml
- framework
- mrzcpd
- tsg-env-tun-mode
- sapp
- tsg_master
- kni
- firewall
- http_healthcheck
- clotho
- certstore
- cert-redis
- tfe
- telegraf_statistic
- proxy_status
- tsg_device_tag

View File

@@ -1,73 +1,58 @@
#########################################
#####1: Inline_device; 2: Allot; 3: ADC_Tun_mode;
tsg_access_type: 2
tsg_access_type: 3
#####2: ADC;
tsg_running_type: 2
#####deploy mode: cluster, single
deploy_mode: "cluster"
########################################
#Deploy_finished_reboot
Deploy_finished_reboot: 0
########################################
#IP Config
maat_redis_city_server:
address: "10.4.62.253"
port: 7002
maat_redis_server:
address: "192.168.100.1"
address: "192.168.40.168"
port: 7002
port_num: 1
db: 0
dynamic_maat_redis_server:
address: "192.168.100.1"
address: "192.168.40.168"
port: 7002
port_num: 1
db: 1
db: 0
cert_store_server:
address: "192.168.100.1"
port: 9991
log_kafkabrokers:
address: ['1.1.1.1:9092','2.2.2.2:9092']
address: "1.1.1.1:9092,2.2.2.2:9092"
log_minio:
address: "10.4.62.253"
address: "192.168.40.168;"
port: 9090
#########################################
#Log Level Config
#日志等级 10:DEBUG 20:INFO 30:FATAL
fw_ftp_log_level: 10
fw_mail_log_level: 10
fw_http_log_level: 10
fw_dns_log_level: 10
fw_quic_log_level: 10
app_control_log_level: 10
capture_packet_log_level: 10
tsg_log_level: 10
tsg_master_log_level: 10
kni_log_level: 10
#日志等级 DEBUG INFO FATAL
tfe_log_level: FATAL
tfe_http_log_level: FATAL
pangu_log_level: FATAL
doh_log_level: FATAL
certstore_log_level: FATAL
packet_dump_log_level: 10
fw_ftp_log_level: 30
fw_mail_log_level: 30
fw_http_log_level: 30
fw_dns_log_level: 30
fw_quic_log_level: 30
capture_packet_log_level: 30
tsg_log_level: 30
tsg_master_log_level: 30
kni_log_level: 30
tfe_log_level: 30
tfe_http_log_level: 30
pangu_log_level: 30
doh_log_level: 30
certstore_log_level: 30
clotho_log_level: 10
#######################################
#Sapp Performance Config
#Sapp工作在ADC计算板0时建议使用如下30+8的配置以保证更高的处理性能
sapp:
worker_threads: 42
send_only_threads_max: 1
bind_mask: 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43
worker_threads: 30
send_only_threads_max: 8
bind_mask: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37
inbound_route_dir: 1
########################################
@@ -90,35 +75,15 @@ kni:
#Tfe Config
tfe:
nr_threads: 32
mirror_enable: 1
mc_cache_eth: lo
keykeeper:
no_cache: 0
########################################
#Marsio Config
#marsio工作在ADC计算板时建议使用如下配置以保证更高的处理性能
mcn0_mrzcpd:
iocore: 52,53,54,55
mcn123_mrzcpd:
iocore: 54,55
mrzcpd:
iocore: 44,45,46,47
mrtunnat:
lcore_id: 48,49,50,51
#########################################
#Tsg_app
tsg_app_enable: 0
app_global_ip: "1.1.1.1"
applog_level: 10
app_master_log_level: 10
app_sketch_local_log_level: 10
app_control_plug_log_level: 10
breakpad_upload_url: http://10.4.63.4:9000/api/2/minidump/?sentry_key=3203b43fd5384a7dbe6a48ecb1f3c595
data_center: Kyzylorda
tsg_master_entrance_id: 9
nic_mgr:
name: em1
sapp_prometheus_enable: 1
sapp_prometheus_port: 9273
sapp_prometheus_url_path: "/metrics"
lcore_id: 40,41,42,43

View File

@@ -1,7 +1,7 @@
#########################################
#Mcn0管理口网卡名
nic_mgr:
name: ens1f3
name: enp6s0
#########################################
#Mcn0流量接入网卡固定配置
@@ -29,13 +29,9 @@ inline_device_config:
#########################################
#Allot接入相关配置
AllotAccess:
#virturlInterface_1: ens1f2.103
#virturlInterface_2: ens1f2.104
virturlID_1: 1201
virturlID_2: 1202
virturlID_3: 1301
virturlID_4: 1302
#vvipv4_mask: 24
#vvipv6_mask: 64
bladename: mcn0
virturlInterface_1: ens1f2.103
virturlInterface_2: ens1f2.104
virturlID_1: 103
virturlID_2: 104
vvipv4_mask: 24
vvipv6_mask: 64

View File

@@ -1,7 +1,7 @@
#########################################
#Mcn1管理口网卡名
nic_mgr:
name: ens1f3
name: enp6s0
#########################################
#Mcn1流量接入网卡固定配置
@@ -15,5 +15,3 @@ nic_inner_ctrl:
nic_traffic_mirror:
name: ens1f2
use_mrzcpd: 1
bladename: mcn1

View File

@@ -1,7 +1,7 @@
#########################################
#Mcn2管理口网卡名
nic_mgr:
name: ens8f3
name: enp6s0
#########################################
#Mcn2流量接入网卡固定配置
@@ -15,5 +15,3 @@ nic_inner_ctrl:
nic_traffic_mirror:
name: ens8f2
use_mrzcpd: 1
bladename: mcn2

View File

@@ -1,7 +1,7 @@
#########################################
#Mcn3管理口网卡名
nic_mgr:
name: ens8f3
name: enp6s0
#########################################
#Mcn3流量接入网卡固定配置
@@ -15,5 +15,3 @@ nic_inner_ctrl:
nic_traffic_mirror:
name: ens8f2
use_mrzcpd: 1
bladename: mcn3

View File

@@ -1,10 +0,0 @@
#########################################
app_sketch_global_log_level: 10
maat_redis_server:
address: "192.168.40.168"
port: 7002
db: 0
file_stat_ip: "1.1.1.1"

View File

@@ -1,15 +1,8 @@
#########################################
#####0: Pcap; 1: Inline_device; 5:ATCA_VXLAN;
tsg_access_type: 0
#####0: Pcap; 1: Inline_device; 4: ATCA_Vlan_Flipping; 5:ATCA_VXLAN;
tsg_access_type: 1
#####0: Tun_mode; 1: normal;
tsg_running_type: 0
#####deploy mode: cluster, single
deploy_mode: "single"
########################################
#Deploy_finished_reboot
Deploy_finished_reboot: 0
tsg_running_type: 1
########################################
#Server Basic Config
@@ -21,32 +14,25 @@ nic_inner_ctrl:
#########################################
#IP Config
#maat_redis_city_serve相关配置只在部署集群模式时使用
maat_redis_city_server:
address: ""
port:
maat_redis_server:
address: "#Bifang IP#"
address: "192.168.40.168"
port: 7002
port_num: 1
db: 0
dynamic_maat_redis_server:
address: "#Bifang IP#"
address: "192.168.40.168"
port: 7002
port_num: 1
db: 1
db: 0
cert_store_server:
address: "192.168.100.1"
port: 9991
log_kafkabrokers:
address: ['1.1.1.1:9092','2.2.2.2:9092']
address: "1.1.1.1:9092,2.2.2.2:9092"
log_minio:
address: "10.9.62.253"
address: "192.168.40.168;"
port: 9090
#########################################
@@ -57,28 +43,24 @@ fw_mail_log_level: 10
fw_http_log_level: 10
fw_dns_log_level: 10
fw_quic_log_level: 10
app_control_log_level: 10
capture_packet_log_level: 10
tsg_log_level: 10
tsg_master_log_level: 10
kni_log_level: 10
#日志等级 DEBUG INFO FATAL
tfe_log_level: FATAL
tfe_http_log_level: FATAL
pangu_log_level: FATAL
doh_log_level: FATAL
tfe_log_level: 10
tfe_http_log_level: 10
pangu_log_level: 10
doh_log_level: 10
certstore_log_level: 10
packet_dump_log_level: 10
clotho_log_level: 10
#########################################
#Sapp Performance Config
#如果tsg_access_type=0sapp跑在pcap模式则以下配置可忽略
sapp:
worker_threads: 23
send_only_threads_max: 1
bind_mask: 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24
worker_threads: 16
send_only_threads_max: 8
bind_mask: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23
inbound_route_dir: 1
#########################################
@@ -108,7 +90,9 @@ kni:
#Tfe Config
tfe:
nr_threads: 32
mirror_enable: 1
mc_cache_eth: lo
keykeeper:
no_cache: 0
#########################################
#Marsio Config
@@ -118,15 +102,6 @@ mrzcpd:
mrtunnat:
lcore_id: 38
#########################################
#Tsg_app
tsg_app_enable: 1
app_global_ip: "1.1.1.1"
applog_level: 10
app_master_log_level: 10
app_sketch_local_log_level: 10
app_control_plug_log_level: 10
#########################################
#ATCA Config
#下列配置只在tsg_access_type=4时生效
@@ -153,14 +128,3 @@ inline_device_config:
keepalive_ip: 192.168.1.30
keepalive_mask: 255.255.255.252
data_incoming: eth5
#########################################
#新增配置项,均为默认值不用改
breakpad_upload_url: http://127.0.0.1:9000/api/2/minidump/?sentry_key=3556bac347c74585a994eb6823faf5c6
data_center: Beijing
tsg_master_entrance_id: 0
sapp_prometheus_enable: 1
sapp_prometheus_port: 9273
sapp_prometheus_url_path: "/metrics"

View File

@@ -4,11 +4,7 @@
#变量device_id根据设备序号设置即可
#变量vvipv4_1、vvipv4_2、vvipv6_1、vvipv6_2为Allot相关配置其他环境可不填或直接删除变量
#
#20.09版本新增APP部署
#[app_global]
#0.0.0.0
#[server_as_tun_mode]
#[server-as-tun-mode]
#1.1.1.1 device_id=device_1
#
#[adc_mxn]
@@ -31,15 +27,10 @@
#10.3.76.1 device_id=device_1
#10.3.76.2 device_id=device_2
#[app_global]
#[server_as_tun_mode]
#broken warning:
#10.4.52.71
[server-as-tun-mode]
[adc_mxn]
[adc_mcn0]
[adc_mcn1]
[adc_mcn2]
[adc_mcn3]
[app_global]
[server_as_tun_mode]

View File

@@ -1,72 +0,0 @@
- name: "copy freeipmi tools"
copy:
src: '{{ role_path }}/files/freeipmi-1.5.7-3.el7.x86_64.rpm'
dest: /tmp/ansible_deploy/
- name: "Install freeipmi rpm package"
yum:
name:
- "/tmp/ansible_deploy/freeipmi-1.5.7-3.el7.x86_64.rpm"
state: present
- name: "mkdir /opt/adc-exporter/"
file:
path: /opt/adc-exporter/
state: directory
- name: "copy node_exporter"
copy:
src: '{{ role_path }}/files/node_exporter'
dest: /opt/adc-exporter/node_exporter
mode: 0755
- name: "copy systemd_exporter"
copy:
src: '{{ role_path }}/files/systemd_exporter'
dest: /opt/adc-exporter/systemd_exporter
mode: 0755
- name: "copy ipmi_exporter"
copy:
src: '{{ role_path }}/files/ipmi_exporter'
dest: /opt/adc-exporter/ipmi_exporter
mode: 0755
- name: "templates adc-exporter-node.service"
template:
src: "{{role_path}}/templates/adc-exporter-node.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-node.service
tags: template
- name: "templates adc-exporter-systemd.service"
template:
src: "{{role_path}}/templates/adc-exporter-systemd.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-systemd.service
tags: template
- name: "templates adc-exporter-ipmi.service"
template:
src: "{{role_path}}/templates/adc-exporter-ipmi.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-ipmi.service
tags: template
- name: 'adc-exporter-node service start'
systemd:
name: adc-exporter-node
enabled: yes
daemon_reload: yes
state: started
- name: 'adc-exporter-systemd service start'
systemd:
name: adc-exporter-systemd
enabled: yes
daemon_reload: yes
state: restarted
- name: 'adc-exporter-ipmi service start'
systemd:
name: adc-exporter-ipmi
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1,11 +0,0 @@
[Unit]
Description=IPMI Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/ipmi_exporter
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Node Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/node_exporter
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Systemd Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/systemd_exporter --web.disable-exporter-metrics
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,23 +0,0 @@
- name: "mkdir /opt/adc-exporter/"
file:
path: /opt/adc-exporter/
state: directory
- name: "copy ping_exporter"
copy:
src: '{{ role_path }}/files/ping_exporter'
dest: /opt/adc-exporter/ping_exporter
mode: 0755
- name: "templates ping_exporter.service"
template:
src: "{{role_path}}/templates/adc-exporter-ping.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-ping.service
tags: template
- name: 'adc-exporter-ping service start'
systemd:
name: adc-exporter-ping
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1,11 +0,0 @@
[Unit]
Description=Ping Exporter
After=network.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter/ping_exporter {{ ping_test.target|join(" ")}} --ping.size=512 --ping.interval=0.5s
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,34 +0,0 @@
- name: "mkdir /opt/adc-exporter-proxy/"
file:
path: /opt/adc-exporter-proxy/
state: directory
- name: "copy file to device"
copy:
src: '{{ role_path }}/files/'
dest: /tmp/ansible_deploy/
- name: "unarchive adc-exporter-proxy(NGINX)"
unarchive:
src: /tmp/ansible_deploy/adc_exporter_proxy.tar.gz
dest: /opt/adc-exporter-proxy
remote_src: yes
- name: "templates adc-exporter-proxy.service"
template:
src: "{{role_path}}/templates/adc-exporter-proxy.service.j2"
dest: /usr/lib/systemd/system/adc-exporter-proxy.service
tags: template
- name: "template nginx.conf"
template:
src: "{{role_path}}/templates/nginx.conf.j2"
dest: /opt/adc-exporter-proxy/adc-exporter-proxy/conf/nginx.conf
tags: template
- name: 'adc-exporter-proxy service start'
systemd:
name: adc-exporter-proxy
enabled: yes
daemon_reload: yes
state: restarted

View File

@@ -1,12 +0,0 @@
[Unit]
Description=ADC Exporter Proxy (NGINX) for NEZHA
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=simple
ExecStart=/opt/adc-exporter-proxy/adc-exporter-proxy/sbin/nginx -p /opt/adc-exporter-proxy/adc-exporter-proxy
ExecReload=/opt/adc-exporter-proxy/adc-exporter-proxy/sbin/nginx -p /opt/adc-exporter-proxy/adc-exporter-proxy -s reload
ExecStop=/opt/adc-exporter-proxy/adc-exporter-proxy/sbin/nginx -p /opt/adc-exporter-proxy/adc-exporter-proxy -s stop
[Install]
WantedBy=multi-user.target

View File

@@ -1,152 +0,0 @@
user nobody;
worker_processes 1;
daemon off;
error_log logs/error.log;
error_log logs/error.log notice;
error_log logs/error.log info;
pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
gzip on;
server {
listen 9000;
server_name localhost;
location /metrics/blade/mcn0/node_exporter {
proxy_pass http://192.168.100.1:9100/metrics;
}
location /metrics/blade/mcn1/node_exporter {
proxy_pass http://192.168.100.2:9100/metrics;
}
location /metrics/blade/mcn2/node_exporter {
proxy_pass http://192.168.100.3:9100/metrics;
}
location /metrics/blade/mcn3/node_exporter {
proxy_pass http://192.168.100.4:9100/metrics;
}
location /metrics/blade/mxn/node_exporter {
proxy_pass http://192.168.100.5:9100/metrics;
}
location /metrics/blade/mcn0/systemd_exporter {
proxy_pass http://192.168.100.1:9558/metrics;
}
location /metrics/blade/mcn1/systemd_exporter {
proxy_pass http://192.168.100.2:9558/metrics;
}
location /metrics/blade/mcn2/systemd_exporter {
proxy_pass http://192.168.100.3:9558/metrics;
}
location /metrics/blade/mcn3/systemd_exporter {
proxy_pass http://192.168.100.4:9558/metrics;
}
location /metrics/blade/mcn0/ipmi_exporter {
proxy_pass http://192.168.100.1:9290/metrics;
}
location /metrics/blade/mcn1/ipmi_exporter {
proxy_pass http://192.168.100.2:9290/metrics;
}
location /metrics/blade/mcn2/ipmi_exporter {
proxy_pass http://192.168.100.3:9290/metrics;
}
location /metrics/blade/mcn3/ipmi_exporter {
proxy_pass http://192.168.100.4:9290/metrics;
}
location /metrics/blade/mxn/ipmi_exporter {
proxy_pass http://192.168.100.5:9290/metrics;
}
location /metrics/blade/mcn0/certstore {
proxy_pass http://192.168.100.1:9002/metrics;
}
location /metrics/blade/mcn1/tfe {
proxy_pass http://192.168.100.2:9001/metrics;
}
location /metrics/blade/mcn2/tfe {
proxy_pass http://192.168.100.3:9001/metrics;
}
location /metrics/blade/mcn3/tfe {
proxy_pass http://192.168.100.4:9001/metrics;
}
location /metrics/blade/mcn0/sapp {
proxy_pass http://192.168.100.1:9273/metrics;
}
location /metrics/blade/mcn0/mrapm_device {
proxy_pass http://192.168.100.1:8901/metrics;
}
location /metrics/blade/mcn0/mrapm_stream {
proxy_pass http://192.168.100.1:8902/metrics;
}
location /metrics/blade/mcn1/mrapm_device {
proxy_pass http://192.168.100.2:8901/metrics;
}
location /metrics/blade/mcn1/mrapm_stream {
proxy_pass http://192.168.100.2:8902/metrics;
}
location /metrics/blade/mcn2/mrapm_device {
proxy_pass http://192.168.100.3:8901/metrics;
}
location /metrics/blade/mcn2/mrapm_stream {
proxy_pass http://192.168.100.3:8902/metrics;
}
location /metrics/blade/mcn3/mrapm_device {
proxy_pass http://192.168.100.4:8901/metrics;
}
location /metrics/blade/mcn3/mrapm_stream {
proxy_pass http://192.168.100.4:8902/metrics;
}
location /metrics/blade/mcn0/maat_redis {
proxy_pass http://192.168.100.1:9121/metrics;
}
location /metrics/blade/mcn0/ping_exporter {
proxy_pass http://192.168.100.1:9427/metrics;
}
}
}

View File

@@ -1,36 +0,0 @@
- name: "copy app_global rpm to destination server"
copy:
src: "{{ role_path }}/files/"
dest: /tmp/ansible_deploy/
- name: "install app rpms from localhost"
yum:
name:
- /tmp/ansible_deploy/emqx-centos7-v4.1.2.x86_64.rpm
- /tmp/ansible_deploy/app-sketch-global-1.0.3.202010.a7b2e40-1.el7.x86_64.rpm
state: present
- name: "template the app_sketch_global.conf"
template:
src: "{{ role_path }}/templates/app_sketch_global.conf.j2"
dest: /opt/tsg/app-sketch-global/conf/app_sketch_global.conf
- name: "template the zlog.conf"
template:
src: "{{ role_path }}/templates/zlog.conf.j2"
dest: /opt/tsg/app-sketch-global/conf/zlog.conf
- name: "Start emqx"
systemd:
name: emqx.service
state: started
enabled: yes
daemon_reload: yes
- name: "Start app-sketch-global"
systemd:
name: app-sketch-global.service
state: started
enabled: yes
daemon_reload: yes

View File

@@ -1,41 +0,0 @@
[SYSTEM]
#1:print on screen, 0:don't
DEBUG_SWITCH = 1
RUN_LOG_PATH = "conf/zlog.conf"
[breakpad]
disable_coredump=0
enable_breakpad=1
breakpad_minidump_dir=/tmp/app-sketch-global/crashreport
enable_breakpad_upload=0
breakpad_upload_url={{ breakpad_upload_url }}
[CONFIG]
#Number of running threads
thread-nu = 1
timeout = 3600
address="tcp://127.0.0.1:1883"
topic_name="APP_SIGNATURE_ID"
client_name="ExampleClientSub"
[maat]
# 0:json 1: redis 2: iris
maat_input_mode=1
table_info=./resource/table_info.conf
json_cfg_file=./resource/gtest.json
stat_file=logs/verify-policy.status
full_cfg_dir=verify-policy/
inc_cfg_dir=verify-policy/
maat_redis_server={{ maat_redis_server.address }}
maat_redis_port_range={{ maat_redis_server.port }}
maat_redis_db_index={{ maat_redis_server.db }}
effect_interval_s=1
accept_tags={"tags":[{"tag":"location","value":"Astana"}]}
[stat]
statsd_server={{ file_stat_ip }}
statsd_port=8100
statsd_cycle=5
# FS_OUTPUT_STATSD=1, FS_OUTPUT_INFLUX_LINE=2
statsd_format=2

View File

@@ -1,12 +0,0 @@
[global]
default format = "%d(%c), %V, %F, %U, %m%n"
[levels]
DEBUG=10
INFO=20
FATAL=30
[rules]
*.fatal "./logs/error.log.%d(%F)";
*.{{ app_sketch_global_log_level }} "./logs/app_sketch_global.log.%d(%F)"

View File

@@ -1,14 +0,0 @@
---
- name: "copy app_proto_identify rpm package destination server"
copy:
src: "{{ role_path }}/files/"
dest: /tmp/ansible_deploy/
- name: "install app_proto_identify"
yum:
name: "{{ app_packages }}"
state: present
skip_broken: yes
vars:
app_packages:
- /tmp/ansible_deploy/app_proto_identify-1.0.7.a5113ba-2.el7.x86_64.rpm

View File

@@ -1,12 +0,0 @@
[Unit]
Description=Redis persistent key-value database
After=network.target
[Service]
ExecStart=/usr/bin/redis-server /etc/cert-redis.conf --supervised systemd
ExecStop=/usr/libexec/redis-shutdown cert-redis
Type=notify
[Install]
WantedBy=multi-user.target

View File

@@ -160,7 +160,7 @@ loglevel notice
# Specify the log file name. Also the empty string can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
#logfile "/opt/tsg/cert-redis/6379/6379.log"
logfile "/home/tsg/cert-redis/6379/6379.log"
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
# and optionally update the other syslog parameters to suit your needs.
@@ -244,7 +244,7 @@ dbfilename dump.rdb
# The Append Only File will also be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
#dir /opt/tsg/cert-redis/6379/
dir /home/tsg/cert-redis/6379/
################################# REPLICATION #################################

Binary file not shown.

View File

@@ -0,0 +1,16 @@
[Unit]
Description=Redis persistent key-value database
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/usr/local/bin/start-cert-redis
ExecStop=killall redis-server
Type=forking
RuntimeDirectory=redis
RuntimeDirectoryMode=0755
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,6 @@
#!/bin/bash
#
cp -rf redis-server /usr/local/bin/
cp -rf redis-cli /usr/local/bin
cp -rf cert-redis.service /usr/lib/systemd/system/
cp -rf start-cert-redis /usr/local/bin

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,4 @@
#!/bin/bash
#
/usr/local/bin/redis-server /home/tsg/cert-redis/6379/6379.conf

View File

@@ -1,11 +1,11 @@
- name: "copy cert-redis file to dest"
- name: "copy cert-redis to destination server"
copy:
src: "{{ role_path }}/files/"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
with_items:
- { src: "cert-redis.conf" , dest: "/etc" , mode: "0644" }
- { src: "cert-redis.service" , dest: "/usr/lib/systemd/system" , mode: "0644" }
dest: /home/tsg
mode: 0755
- name: "install cert-redis"
shell: cd /home/tsg/cert-redis;sh install.sh
- name: "start cert-redis"
systemd:

View File

@@ -1,3 +0,0 @@
[Service]
MemoryLimit=16G
ExecStartPost=/bin/bash -c "echo 16G > /sys/fs/cgroup/memory/system.slice/certstore.service/memory.memsw.limit_in_bytes"

View File

@@ -3,31 +3,20 @@
src: "{{ role_path }}/files/"
dest: "/tmp/ansible_deploy/"
- name: Ensures /opt/tsg exists
file: path=/opt/tsg state=directory
- name: Ensures /home/tsg exists
file: path=/home/tsg state=directory
tags: mkdir
- name: install certstore
yum:
name:
- /tmp/ansible_deploy/certstore-2.1.6.20201215.f2e9ba7-1.el7.x86_64.rpm
- /tmp/ansible_deploy/certstore-2.1.2.20200828.f507b3e-1.el7.x86_64.rpm
state: present
- name: template certstore configure file
template:
src: "{{ role_path }}/templates/cert_store.ini.j2"
dest: /opt/tsg/certstore/conf/cert_store.ini
- name: template certstore zlog file
template:
src: "{{ role_path }}/templates/zlog.conf.j2"
dest: /opt/tsg/certstore/conf/zlog.conf
- name: "copy memory limit file to certstore.service.d"
copy:
src: "{{ role_path }}/files/memory.conf"
dest: /etc/systemd/system/certstore.service.d/
mode: 0644
dest: /home/tsg/certstore/conf/cert_store.ini
- name: "start certstore"
systemd:

View File

@@ -1,15 +1,9 @@
[SYSTEM]
#1:print on screen, 0:don't
DEBUG_SWITCH = 1
RUN_LOG_PATH = "conf/zlog.conf"
[breakpad]
disable_coredump=0
enable_breakpad=1
breakpad_minidump_dir=/tmp/certstore/crashreport
enable_breakpad_upload=1
breakpad_upload_url= {{ breakpad_upload_url }}
#10:DEBUG, 20:INFO, 30:FATAL
RUN_LOG_LEVEL = {{ certstore_log_level }}
RUN_LOG_PATH = ./logs
[CONFIG]
#Number of running threads
thread-nu = 4
@@ -20,8 +14,7 @@ expire_after = 30
#Local default root certificate path
local_debug = 1
ca_path = ./cert/tango-ca-v3-trust-ca.pem
untrusted_ca_path = ./cert/tango-ca-v3-untrust-ca.pem
untrusted_ca_path = ./cert/mesalab-ca-untrust.pem
[MAAT]
#Configure the load mode,
#0: using the configuration distribution network
@@ -38,23 +31,18 @@ inc_cfg_dir=./rule/inc/index
full_cfg_dir=./rule/full/index
#Json file path when json schema is used
pxy_obj_keyring=./conf/pxy_obj_keyring.json
[LIBEVENT]
#Local monitor port number, default is 9991
port = 9991
[CERTSTORE_REDIS]
#The Redis server IP address and port number where the certificate is stored locally
ip = 127.0.0.1
port = 6379
[MAAT_REDIS]
#Maat monitors the Redsi server IP address and port number
ip = {{ maat_redis_server.address }}
port = {{ maat_redis_server.port }}
dbindex = {{ maat_redis_server.db }}
[stat]
statsd_server=127.0.0.1
statsd_port=8100
statsd_set_prometheus_port=9002
statsd_set_prometheus_url_path=/metrics
statsd_server=192.168.100.1
statsd_port=8126

View File

@@ -1,10 +0,0 @@
[global]
default format = "%d(%c), %V, %F, %U, %m%n"
[levels]
DEBUG=10
INFO=20
FATAL=30
[rules]
*.fatal "./logs/error.log.%d(%F)";
*.{{ certstore_log_level }} "./logs/certstore.log.%d(%F)"

View File

@@ -0,0 +1,13 @@
[Unit]
Description=clotho
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
ExecStart=/home/mesasoft/clotho/clotho
ExecStop=killall clotho
Type=forking
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,29 @@
- name: "copy clotho rpm to destination server"
copy:
src: "{{ role_path }}/files/clotho-debug-1.0.0.-1.el7.x86_64.rpm"
dest: /tmp/ansible_deploy/
- name: "copy clotho.service to destination server"
copy:
src: "{{ role_path }}/files/clotho.service"
dest: /usr/lib/systemd/system
mode: 0755
- name: "install clotho rpm from localhost"
yum:
name:
- /tmp/ansible_deploy/clotho-debug-1.0.0.-1.el7.x86_64.rpm
state: present
- name: "Template the clotho.conf"
template:
src: "{{ role_path }}/templates/clotho.conf.j2"
dest: /home/mesasoft/clotho/conf/clotho.conf
tags: template
- name: "start clotho"
systemd:
name: clotho.service
enabled: yes
daemon_reload: yes

View File

@@ -0,0 +1,7 @@
[KAFKA]
BROKER_LIST={{ log_kafkabrokers.address }}
[SYSTEM]
NIC_NAME={{ nic_mgr.name }}
LOG_LEVEL={{ clotho_log_level }}
LOG_PATH=log/clotho

View File

@@ -11,22 +11,22 @@
skip_broken: yes
vars:
fw_packages:
- /tmp/ansible_deploy/capture_packet_plug-3.0.6.a2db4a4-2.el7.x86_64.rpm
- /tmp/ansible_deploy/conn_telemetry-1.0.2.8d6da43-2.el7.x86_64.rpm
- /tmp/ansible_deploy/dns-2.0.9.b639626-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ftp-1.0.8.13d5fda-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_dns_plug-3.0.2.dab58fa-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ftp_plug-3.0.1.0a78573-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_http_plug-3.0.4.484b54d-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_mail_plug-3.0.2.7401550-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_quic_plug-3.0.4.947ef77-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ssl_plug-3.0.6.a121701-2.el7.x86_64.rpm
- /tmp/ansible_deploy/http-2.0.5.c61ad9a-2.el7.x86_64.rpm
- /tmp/ansible_deploy/mail-1.0.9.c1d3bde-2.el7.x86_64.rpm
- /tmp/ansible_deploy/quic-1.1.17.8c22b4d-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ssl-1.0.12.16b8fb5-2.el7.x86_64.rpm
- /tmp/ansible_deploy/tsg_conn_sketch-2.0.12.0ad5a3b-2.el7.x86_64.rpm
- /tmp/ansible_deploy/app_control_plug-1.0.9.97846eb-2.el7.x86_64.rpm
- /tmp/ansible_deploy/capture_packet_plug-3.0.2.09f193c-2.el7.x86_64.rpm
- /tmp/ansible_deploy/clotho-debug-1.0.0.-1.el7.x86_64.rpm
- /tmp/ansible_deploy/dns-2.0.6.d8317e9-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ftp-1.0.6.2710506-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_dns_plug-3.0.0.0a5d574-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ftp_plug-3.0.0.7a867ea-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_http_plug-3.0.0.1ca1c65-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_mail_plug-3.0.0.3b4e481-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_quic_plug-3.0.0.b06d39c-2.el7.x86_64.rpm
- /tmp/ansible_deploy/fw_ssl_plug-3.0.1.7ea9976-2.el7.x86_64.rpm
- /tmp/ansible_deploy/http-2.0.3.9218b4b-2.el7.x86_64.rpm
- /tmp/ansible_deploy/mail-1.0.7.9e3be05-2.el7.x86_64.rpm
- /tmp/ansible_deploy/quic-1.1.6.d6755d8-2.el7.x86_64.rpm
- /tmp/ansible_deploy/ssl-1.0.3.e8482a4-2.el7.x86_64.rpm
- /tmp/ansible_deploy/tsg_conn_record-1.0.2.2afb19a-2.el7.x86_64.rpm
- /tmp/ansible_deploy/tsg_conn_sketch-2.0.v2.0_alpha.af621ca-2.el7.x86_64.rpm
- name: "Template the tsgconf/main.conf"
template:
@@ -46,15 +46,3 @@
src: "{{ role_path }}/templates/capture_packet_plug.conf.j2"
dest: /home/mesasoft/sapp_run/conf/capture_packet_plug.conf
tags: template
- name: "Template the tsgconf/app_l7_proto_id.conf"
template:
src: "{{ role_path }}/templates/app_l7_proto_id.conf.j2"
dest: /home/mesasoft/sapp_run/tsgconf/app_l7_proto_id.conf
- name: "Template the /home/mesasoft/sapp_run/plug/business/tsg_conn_sketch/tsg_conn_sketch.inf"
template:
src: "{{ role_path }}/templates/tsg_conn_sketch.inf.j2"
dest: /home/mesasoft/sapp_run/plug/business/tsg_conn_sketch/tsg_conn_sketch.inf
tags: template

View File

@@ -1,51 +0,0 @@
#TYPE1:UCHAR,2:USHORT,3:USTRING,4:ULOG,5:USTRING,6:FILE,7:UBASE64,8:PACKET
#TYPE FIELD VALUE
STRING UNCATEGORIZED 100
STRING UNCATEGORIZED 101
STRING UNKNOWN_OTHER 102
STRING DNS 103
STRING FTP 104
STRING FTPS 105
STRING HTTP 106
STRING HTTPS 107
STRING ICMP 108
STRING IKE 109
STRING MAIL 110
STRING IMAPS 111
STRING IPSEC 112
STRING XMPP 113
STRING L2TP 114
STRING NTP 115
STRING POP3S 117
STRING PPTP 118
STRING QUIC 119
STRING SIP 120
STRING SMB 121
STRING SMTPS 123
STRING SPDY 124
STRING SSH 125
STRING SSL 126
STRING SOCKS 127
STRING TELNET 128
STRING DHCP 129
STRING RADIUS 130
STRING OPENVPN 131
STRING STUN 132
STRING TEREDO 133
STRING DTLS 134
STRING DoH 135
STRING ISAKMP 136
STRING MDNS 137
STRING NETBIOS 138
STRING NETFLOW 139
STRING RDP 140
STRING RTCP 141
STRING RTP 142
STRING SLP 143
STRING SNMP 144
STRING SSDP 145
STRING TFTP 146
STRING BJNP 147
STRING LDAP 148
STRING RTMP 149
STRING RTSP 150

View File

@@ -7,19 +7,16 @@ TABLE_INFO=conf/capture_packet_tableinfo.conf
STAT_FILE=capture_packet_maat.status
EFFECT_INTERVAL_S=1
REDIS_IP={{ maat_redis_server.address }}
REDIS_PORT_NUM={{ maat_redis_server.port_num }}
REDIS_PORT_NUM=1
REDIS_PORT={{ maat_redis_server.port }}
REDIS_INDEX={{ maat_redis_server.db }}
REDIS_INDEX=0
JSON_CFG_FILE=conf/capture_packet_maat.json
INC_CFG_DIR=capture_packet_rule/inc/index/
FULL_CFG_DIR=capture_packet_rule/full/index/
EFFECTIVE_RANGE_FILE=/opt/tsg/etc/tsg_device_tag.json
ACCEPT_TAGS={"tags":[{"tag":"data_center","value":"{{ data_center }}"}]}
[LOG]
NIC_NAME={{ nic_mgr.name }}
BROKER_LIST={{ log_kafkabrokers.address | join(",") }}
BROKER_LIST={{ log_kafkabrokers.address }}
FIELD_FILE=conf/capture_packet_log_field.conf
[SYSTEM]

View File

@@ -7,13 +7,12 @@ TABLE_INFO=tsgconf/tsg_static_tableinfo.conf
STAT_FILE=tsg_static_maat.status
EFFECT_INTERVAL_S=1
REDIS_IP={{ maat_redis_server.address }}
REDIS_PORT_NUM={{ maat_redis_server.port_num }}
REDIS_PORT={{ maat_redis_server.port }}
REDIS_INDEX={{ maat_redis_server.db }}
REDIS_PORT_NUM=1
REDIS_PORT=7002
REDIS_INDEX=0
JSON_CFG_FILE=tsgconf/tsg_maat.json
INC_CFG_DIR=tsgrule/inc/index/
FULL_CFG_DIR=tsgrule/full/index/
EFFECTIVE_RANGE_FILE=/opt/tsg/etc/tsg_device_tag.json
[DYNAMIC]
###0:location 1:json 2:redis
@@ -24,13 +23,10 @@ TABLE_INFO=tsgconf/tsg_dynamic_tableinfo.conf
STAT_FILE=tsg_dynamic_maat.status
EFFECT_INTERVAL_S=1
REDIS_IP={{ dynamic_maat_redis_server.address }}
REDIS_PORT_NUM={{ dynamic_maat_redis_server.port_num }}
REDIS_PORT={{ dynamic_maat_redis_server.port }}
REDIS_INDEX={{ dynamic_maat_redis_server.db }}
REDIS_PORT_NUM=1
REDIS_PORT=7002
REDIS_INDEX=1
JSON_CFG_FILE=tsgconf/tsg_maat.json
INC_CFG_DIR=tsgrule/inc/index/
FULL_CFG_DIR=tsgrule/full/index/
EFFECTIVE_RANGE_FILE=/opt/tsg/etc/tsg_device_tag.json
[MAAT]
ACCEPT_TAGS={"tags":[{"tag":"data_center","value":"{{ data_center }}"}]}

View File

@@ -1,64 +1,55 @@
[FTP_PLUG]
LOG_PATH="./tsglog/fw_ftp_plug/fw_ftp_plug"
LOG_PATH=./tsglog/fw_ftp_plug/fw_ftp_plug
LOG_LEVEL={{ fw_ftp_log_level }}
TIMEOUT=600
[MAIL_PLUG]
LOG_PATH="./tsglog/fw_mail_plug/fw_mail_plug"
LOG_PATH=./tsglog/fw_mail_plug/fw_mail_plug
LOG_LEVEL={{ fw_mail_log_level }}
TIMEOUT=600
[HTTP_PLUG]
LOG_PATH="./tsglog/fw_http_plug/fw_http_plug"
LOG_PATH=./tsglog/fw_http_plug/fw_http_plug
LOG_LEVEL={{ fw_http_log_level }}
[DNS_PLUG]
LOG_PATH="./tsglog/fw_dns_plug/fw_dns_plug"
LOG_PATH=./tsglog/fw_dns_plug/fw_dns_plug
LOG_LEVEL={{ fw_dns_log_level }}
[QUIC_PLUG]
LOG_PATH="./tsglog/fw_quic_plug/fw_quic_plug"
LOG_PATH=./tsglog/fw_quic_plug/fw_quic_plug
LOG_LEVEL={{ fw_quic_log_level }}
[CONTROL_PLUG]
LOG_PATH="./tsglog/app_control_plug/app_control_plug"
LOG_LEVEL={{ app_control_log_level }}
[MAAT]
PROFILE="./tsgconf/maat.conf"
SUBSCRIBER_ID_TABLE="TSG_OBJ_SUBSCRIBER_ID"
CB_SUBSCRIBER_IP_TABLE="TSG_DYN_SUBSCRIBER_IP"
IP_ADDR_TABLE="TSG_SECURITY_ADDR"
PROFILE=./tsgconf/maat.conf
SUBSCRIBER_ID_TABLE=TSG_OBJ_SUBSCRIBER_ID
CB_SUBSCRIBER_IP_TABLE=TSG_DYN_SUBSCRIBER_IP
IP_ADDR_TABLE=TSG_SECURITY_ADDR
[TSG_LOG]
MODE=1
NIC_NAME="{{ nic_mgr.name }}"
NIC_NAME={{ nic_mgr.name }}
MAX_SERVICE=1
LOG_LEVEL={{ tsg_log_level }}
LOG_PATH="./tsglog/tsglog"
BROKER_LIST="{{ log_kafkabrokers.address | join(",") }}"
COMMON_FIELD_FILE="tsgconf/tsg_log_field.conf"
LOG_PATH=./tsglog/tsglog
BROKER_LIST={{ log_kafkabrokers.address }}
COMMON_FIELD_FILE=tsgconf/tsg_log_field.conf
[STATISTIC]
CYCLE=5
TELEGRAF_PORT=8100
TELEGRAF_IP="127.0.0.1"
OUTPUT_PATH="./tsg_statistic.log"
APP_NAME="statistic"
TELEGRAF_IP=127.0.0.1
OUTPUT_PATH=./tsg_statistic.log
APP_NAME=statistic
[FIELD_STAT]
CYCLE=5
TELEGRAF_PORT=8100
TELEGRAF_IP="127.0.0.1"
OUTPUT_PATH="./tsg_stat.log"
APP_NAME="tsg_master"
TELEGRAF_IP=127.0.0.1
OUTPUT_PATH=./tsg_stat.log
APP_NAME=tsg_master
[SYSTEM]
ENTRANCE_ID={{ tsg_master_entrance_id }}
LOG_LEVEL={{ tsg_master_log_level }}
LOG_PATH="./tsglog/tsg_master"
POLICY_PRIORITY_LABEL="POLICY_PRIORITY"
DEVICE_ID_COMMAND="hostname | awk -F'-' '{print $3}'| awk -F'adc' '{print $2}'"
[TSG_CONN_SKETCH]
log_service=2
LOG_PATH=./tsglog/tsg_master
POLICY_PRIORITY_LABEL=POLICY_PRIORITY

View File

@@ -1,35 +0,0 @@
[PLUGINFO]
PLUGNAME=TSG_CONN_SKETCH
SO_PATH=./plug/business/tsg_conn_sketch/tsg_conn_sketch.so
INIT_FUNC=tsg_conn_record_init
DESTROY_FUNC=tsg_conn_record_destroy
[TCP]
FUNC_FLAG=ALL
FUNC_NAME=tsg_record_tcp_entry
[TCP_ALL]
FUNC_FLAG=ALL
FUNC_NAME=tsg_record_tcpall_entry
[UDP]
FUNC_FLAG=ALL
FUNC_NAME=tsg_record_udp_entry
[HTTP]
FUNC_FLAG=ALL
FUNC_NAME=tsg_record_http_entry
[SSL]
FUNC_FLAG=SSL_CLIENT_HELLO,SSL_SERVER_HELLO,SSL_APPLICATION_DATA,SSL_CERTIFICATE_DETAIL
FUNC_NAME=tsg_record_ssl_entry
#[DNS]
#FUNC_FLAG=ALL
#FUNC_NAME=tsg_record_dns_entry
[MAIL]
FUNC_FLAG=ALL
FUNC_NAME=tsg_record_mail_entry

Some files were not shown because too many files have changed in this diff Show More