diff --git a/containers/Makefile b/containers/Makefile new file mode 100644 index 00000000..eab361e2 --- /dev/null +++ b/containers/Makefile @@ -0,0 +1,71 @@ +export YUM_REPO_FILE +export IMAGE_TAG + +BUILD_DIR := build + +IMAGE_NAMES := $(shell find . -maxdepth 1 -type d ! -name "build" ! -name "." | sed 's|^\./||') +IMAGE_REGISTRY := registry.gdnt-cloud.website/tsg/os +IMAGE_TAR_DIR := $(BUILD_DIR)/images + + +ENV_FILES := $(BUILD_DIR)/IMAGE_TAG_$(IMAGE_TAG:/=_).env + +ARCH := $(shell uname -m) + +ifeq ($(ARCH),x86_64) +IMAGE_ARCH := amd64 +else ifeq ($(ARCH),aarch64) +IMAGE_ARCH := arm64 +else +IMAGE_ARCH := unknown +endif + +define write_env_files +$(1): + mkdir -p $(BUILD_DIR); echo $(1) > $(1) +endef + +DOCKERFILE_MACROS := dockerfile-macros.j2 + +BUILD_DONE_FILE := build.done + +define build_rule +$(1): $(BUILD_DIR)/$(1)/$(BUILD_DONE_FILE) + +$(BUILD_DIR)/$(1)/$(BUILD_DONE_FILE): $(shell find $(1) -type f) $(2) + @mkdir -p $(BUILD_DIR)/$(1) + @mkdir -p $(IMAGE_TAR_DIR) + $(3) + @echo done > $(BUILD_DIR)/$(1)/$(BUILD_DONE_FILE) +endef + +define build_image_from_dockerfile + /usr/local/bin/j2 -f yaml $(1)/Dockerfile.j2 $(1)/manifest.yaml -o $(BUILD_DIR)/$(1)/Dockerfile + buildah build \ + --volume /etc/hosts:/etc/hosts:ro \ + --volume $(YUM_REPO_FILE):/etc/yum.conf:ro \ + --volume $(YUM_REPO_FILE):/etc/dnf/dnf.conf:ro \ + -f $(BUILD_DIR)/$(1)/Dockerfile \ + --build-arg BASE_IMAGE=$(2) \ + -t $(3) \ + $(1) +endef + +define download_image_tar_from_url + curl -f -u "${PULP_REPO_USERNAME}:${PULP_REPO_PASSWORD}" \ + -o $(IMAGE_TAR_DIR)/prometheus-docker.tar \ + https://repo.geedge.net/filerepo/install/release/tsg-container-images/$(1) +endef + +.PHONY: all clean $(IMAGE_NAMES) + +all: $(IMAGE_NAMES) + +$(foreach name,$(IMAGE_NAMES),\ + $(eval include $(name)/build.mk);\ + $(eval $(call build_rule,$(rule_target),$(rule_prerequisites),$(rule_recipes)))) + +$(foreach file,$(ENV_FILES),$(eval $(call write_env_files,$(file)))) + +clean: + rm -rf $(BUILD_DIR) \ No newline at end of file diff --git a/containers/base/Dockerfile.j2 b/containers/base/Dockerfile.j2 new file mode 100644 index 00000000..03f75638 --- /dev/null +++ b/containers/base/Dockerfile.j2 @@ -0,0 +1,46 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +#RUN dnf -y upgrade --refresh rpm glibc && rm /var/lib/rpm/.rpm.lock && dnf -y upgrade dnf + +RUN dnf -y update && \ + dnf -y install tcpdump \ + numactl-libs \ + iproute \ + iptables \ + procps \ + net-tools \ + ethtool \ + gdb \ + ipmitool \ + liburing \ + vim \ + lrzsz \ + libnsl \ + perf \ + jq \ + perl \ + perl-open \ + valgrind \ + python2 \ + js-d3-flame-graph \ + python3 \ + dnsutils \ + wireshark \ + crudini \ + inotify-tools \ + jemalloc \ + pcre2 \ + epel-release + +RUN python2 -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple PrettyTable==0.7.2 && \ + python3 -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple jinja2 prettytable tomlq toml sdnotify j2cli j2cli[yaml] + +RUN {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} + +COPY files/framework.conf /etc/ld.so.conf.d/ +COPY files/framework.sh /etc/profile.d/ +COPY --chmod=755 files/entrypoint_public.sh /usr/local/bin/ +COPY templates/* /templates/ \ No newline at end of file diff --git a/containers/base/build.mk b/containers/base/build.mk new file mode 100644 index 00000000..7235b0f6 --- /dev/null +++ b/containers/base/build.mk @@ -0,0 +1,3 @@ +rule_target := base +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),rockylinux:8,$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/base/files/entrypoint_public.sh b/containers/base/files/entrypoint_public.sh new file mode 100644 index 00000000..85b6bde5 --- /dev/null +++ b/containers/base/files/entrypoint_public.sh @@ -0,0 +1,119 @@ +#!/bin/sh + +PRESTART_FILE="/opt/tsg/scripts/prestart.sh" +TEMPLATES_DIR="/templates" +VALUES_FILE="${TEMPLATES_DIR}/values.yaml" + +IS_ENABLE_PRESTART="false" +IS_ENABLE_INTERACTIVE_STARTUP="false" + +parse_args() +{ + if [ $# -eq 0 ]; then + echo "No arguments provided, using default configs. Skipping..." + return + fi + + PARSED_OPTIONS=$(getopt -o "" -l enable_prestart,enable_interactive_startup -- "$@") + + if [ $? -ne 0 ]; then + echo "Failed to parse arguments." + exit 1 + fi + + eval set -- "$PARSED_OPTIONS" + + while true; do + case "$1" in + --enable_prestart) + IS_ENABLE_PRESTART="true" + shift ;; + --enable_interactive_startup) + IS_ENABLE_INTERACTIVE_STARTUP="true" + shift ;; + --) + shift + break ;; + *) + echo "Unknown option $1" + break ;; + esac + done +} + +enable_prestart() +{ + if test -e ${PRESTART_FILE}; then + echo WARNING: PRESTART.sh is enable, the commands in PRESTART.sh is: + cat ${PRESTART_FILE} + chmod 0755 ${PRESTART_FILE}; source ${PRESTART_FILE} + echo PRESTART.sh has been exec...... + fi +} + +enable_interactive_startup() +{ + while true; do sleep 10; done +} + +read_device_sn_from_k8s_node_info() { + local APISERVER=https://kubernetes.default.svc + local SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + local NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + local TOKEN=$(cat ${SERVICEACCOUNT}/token) + local CACERT=${SERVICEACCOUNT}/ca.crt + + if [[ -z "$NODE_NAME" ]]; then + >&2 echo "env NODE_NAME is not set or empty!" + return 1 + fi + local OUTPUT_FILE="/tmp/node-${NODE_NAME}.json" + + curl --silent --fail --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" \ + -X GET ${APISERVER}/api/v1/nodes/${NODE_NAME} \ + -o ${OUTPUT_FILE} + + if [[ $? -ne 0 ]]; then + >&2 echo "Failed to retrieve node information!" + return 1 + fi + + local DEVICE_SN=$(cat ${OUTPUT_FILE} | jq -r '.metadata.annotations."tsg-os/device-sn"') + if [[ -z "$DEVICE_SN" || "$DEVICE_SN" == "null" ]]; then + >&2 echo "Device SN not found!" + return 1 + fi + + echo "$DEVICE_SN" +} + + +render_template() { + local template_file=$1 + local output_file=$2 + /usr/local/bin/j2 -f yaml ${TEMPLATES_DIR}/${template_file} ${VALUES_FILE} -o ${output_file} +} + +read_nodeport_from_service() { + local service_name=$1 + local service_namespace=$2 + local service_domain=${service_name}.${service_namespace}.svc + + until nslookup ${service_domain} >&2; do + >&2 echo "waiting for service: ${service_domain}." + sleep 2 + done + + local APISERVER=https://kubernetes.default.svc + local SERVICEACCOUNT=/var/run/secrets/kubernetes.io/serviceaccount + local NAMESPACE=$(cat ${SERVICEACCOUNT}/namespace) + local TOKEN=$(cat ${SERVICEACCOUNT}/token) + local CACERT=${SERVICEACCOUNT}/ca.crt + + curl --silent --fail --cacert ${CACERT} --header "Authorization: Bearer ${TOKEN}" \ + -X GET ${APISERVER}/api/v1/namespaces/${NAMESPACE}/services/${service_name} \ + -o /tmp/service.txt + + local nodeport=$(cat /tmp/service.txt | jq '.spec.ports[] | .nodePort') + echo ${nodeport} +} \ No newline at end of file diff --git a/containers/base/files/framework.conf b/containers/base/files/framework.conf new file mode 100644 index 00000000..ed8ac775 --- /dev/null +++ b/containers/base/files/framework.conf @@ -0,0 +1 @@ +/opt/tsg/framework/lib/ diff --git a/containers/base/files/framework.sh b/containers/base/files/framework.sh new file mode 100644 index 00000000..017ce81f --- /dev/null +++ b/containers/base/files/framework.sh @@ -0,0 +1 @@ +export PATH=/opt/tsg/framework/bin:$PATH diff --git a/containers/base/manifest.yaml b/containers/base/manifest.yaml new file mode 100644 index 00000000..e3b70824 --- /dev/null +++ b/containers/base/manifest.yaml @@ -0,0 +1,130 @@ +packages: + - name: libcjson + version: 1.7.12.6c09dcf + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libdocumentanalyze + version: 2.0.11.719a8ff + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_field_stat + version: 1.0.3.0de785d + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_field_stat2 + version: 2.10.11.b2095aa + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_handle_logger + version: 2.0.12.1dd9e1e + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_htable + version: 3.10.13.bd6fc34 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_prof_load + version: 1.0.9.16148e7 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: librulescan + version: 3.0.1.6145620 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libelua + version: 2.0.1.7760c27 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libwiredcfg + version: 2.0.8.cafaf49 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libWiredLB + version: 2.0.6.54a039d + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libbreakpad_mini + version: 1.0.9.9d98968 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMV_Sketch + version: 2.1.3.20231215.19725c6 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: librdkafka + version: 1.2.2.1218b3c + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_jump_layer + version: 1.0.10.6fb4738 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libswarmkv + version: 4.4.4.5c89f35 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libfieldstat3 + version: 3.1.1.03491ea + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libMESA_sts + version: 1.0.3.d515a96 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libfieldstat4 + version: 4.6.6.2d9b9cd + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libipfix_exporter + version: 1.0.6.0e73c24 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libosfp + version: 1.3.11.d8c406f + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: utable + version: 1.0.11.f3db4a4 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libmaatframe + version: 4.2.1.4fddb2b + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libcbd + version: 3.1.1.c3767f2 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: libdos_protector + version: 3.2.3.07c2e54 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: hasp-tools + version: 1.0.12.66db85d + install_command: dnf + dnf_command_options: "--nogpgcheck" \ No newline at end of file diff --git a/containers/base/templates/macros.j2 b/containers/base/templates/macros.j2 new file mode 100644 index 00000000..f1249c27 --- /dev/null +++ b/containers/base/templates/macros.j2 @@ -0,0 +1,93 @@ +{# macros.j2 #} +{% macro cm_address(cm) -%} + {%- if cm.connectivity == "direct" -%} + {{ cm.direct.address }} + {%- elif cm.connectivity == "builtin" -%} + tsg-cm.tsg-os-system.svc + {%- else -%} + {{ cm.local_cache.cache_name ~ "-redis-master.tsg-os-system.svc" }} + {%- endif %} +{%- endmacro %} + +{% macro cm_port(cm) -%} + {%- if cm.connectivity == "direct" -%} + {{ cm.direct.port }} + {%- elif cm.connectivity == "builtin" -%} + 7002 + {%- else -%} + 6379 + {%- endif %} +{%- endmacro %} + +{% macro address_port_pairs_render(source, separator) -%} + {%- set addresses = [] -%} + {%- if source -%} + {%- for item in source -%} + {%- set address = item.address ~ ":" ~ item.port -%} + {%- do addresses.append(address) -%} + {%- endfor -%} + {{ addresses | join(separator) }} + {%- endif %} +{%- endmacro %} + +{% macro sd_address(sd) -%} + {%- if sd.enable is defined and sd.enable == True -%} + {%- if sd.connectivity == "direct" -%} + {{ sd.direct.address }} + {%- else -%} + {{ sd.local_cache.cache_name ~ "-redis-master.tsg-os-system.svc" }} + {%- endif %} + {%- endif %} +{%- endmacro %} + +{% macro sd_port(sd) -%} + {%- if sd.enable is defined and sd.enable == True -%} + {%- if sd.connectivity == "direct" -%} + {{ sd.direct.port }} + {%- else -%} + 6379 + {%- endif %} + {%- endif %} +{%- endmacro %} + +{% macro device_tag_list(device) -%} + {%- set tags_list = [] -%} + {%- if device.tag is defined and device.tag %} + {%- for tag in device.tag %} + {%- for key, val in tag.items() %} + {%- set tag_json = '{"tag":"' ~ key ~ '","value":"' ~ val ~ '"}' -%} + {%- do tags_list.append(tag_json) -%} + {%- endfor %} + {%- endfor %} + {%- endif %} + {{ tags_list | join(',') }} +{%- endmacro %} + +{% macro safe_read(data, path) -%} + {%- set keys = path.split('.') %} + {%- set ns = namespace(value=data) %} + {%- for key in keys %} + {%- if ns.value is mapping and key in ns.value %} + {%- set ns.value = ns.value[key] %} + {%- else %} + {%- set ns.value = None %} + {%- break %} + {%- endif %} + {%- endfor %} + {{- ns.value if ns.value is not none else '' }} +{%- endmacro %} + +{# ref a.b.c.d using safe_read(a, "b.c.d") #} + +{% macro read_device_tag_value(device, key) -%} + {%- set ns = namespace(value='') %} + {%- if device.tag is defined and device.tag %} + {%- for tag in device.tag %} + {%- if tag is mapping and key in tag %} + {%- set ns.value = tag[key] %} + {%- break %} + {%- endif %} + {%- endfor %} + {%- endif %} + {{- ns.value if ns.value is not none else '' }} +{%- endmacro %} diff --git a/containers/base/templates/tsg_device_tag.json.j2 b/containers/base/templates/tsg_device_tag.json.j2 new file mode 100644 index 00000000..f2d5f3fc --- /dev/null +++ b/containers/base/templates/tsg_device_tag.json.j2 @@ -0,0 +1,3 @@ +{% import '/templates/macros.j2' as macros -%} +[MAAT] +ACCEPT_TAGS={"tags":[{{ macros.device_tag_list(device) }}]} diff --git a/containers/dockerfile-macros.j2 b/containers/dockerfile-macros.j2 new file mode 100644 index 00000000..6fefaffe --- /dev/null +++ b/containers/dockerfile-macros.j2 @@ -0,0 +1,51 @@ +{# +packages: + - name: example + version: 1.1.1 + # url: https://www.example.com/download/test.rpm + download_command: dnf/curl + download_command_options: "--downloadonly --downloaddir /tmp/rpms_download" + download_command_override: "override the download command." + install_command: dnf/rpm + install_command_options: "--prefix /opt/tsg/framework" + install_command_override: "override the install command" +#} + +{% macro install_packages(packages) -%} + {%- set generated_commands = [] -%} + {%- for item in packages if item.name and item.version -%} + {%- set rpm_version = item.name ~ "-" ~ item.version -%} + {%- if item.download_command_override is defined and item.download_command_override -%} + {%- do generated_commands.append(item.download_command_override) -%} + {%- else %} + {%- set command = '' %} + {%- if item.download_command is defined and item.download_command == "curl" %} + {%- set command = item.download_command ~ " " ~ item.curl ~ " " ~ (item.download_command_options | default('')) -%} + {%- endif %} + {%- if item.download_command is defined and item.download_command == "dnf" %} + {%- set command = item.download_command ~ " install -y --downloadonly --downloaddir /tmp/download " ~ rpm_version ~ " " ~ (item.download_command_options | default('')) -%} + {%- endif %} + {%- do generated_commands.append(command) if command -%} + {%- endif %} + {%- if item.install_command_override is defined and item.install_command_override -%} + {%- do generated_commands.append(item.install_command_override) -%} + {%- else %} + {%- set command = '' %} + {%- if item.install_command is defined and item.install_command == "dnf" %} + {%- set command = item.install_command ~ " install -y " ~ rpm_version ~ " " ~ (item.install_command_options | default('')) -%} + {%- endif %} + {%- if item.install_command is defined and item.install_command == "rpm" %} + {%- set command = item.install_command ~ " -ivh " ~ "/tmp/download/" ~ rpm_version ~ "* " ~ (item.install_command_options | default('')) -%} + {%- endif %} + {%- do generated_commands.append(command) if command -%} + {%- endif %} + {%- endfor -%} + {{ generated_commands | join(' && \\\n ') }} +{%- endmacro %} + +{% macro clean_after_install_packages() -%} + {%- set generated_commands = [] -%} + {%- do generated_commands.append("rm -rf /tmp/download") -%} + {%- do generated_commands.append("dnf clean all") -%} + {{ generated_commands | join(' && \\\n ') }} +{%- endmacro %} \ No newline at end of file diff --git a/containers/firewall/Dockerfile.j2 b/containers/firewall/Dockerfile.j2 new file mode 100644 index 00000000..ad6a1159 --- /dev/null +++ b/containers/firewall/Dockerfile.j2 @@ -0,0 +1,27 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +RUN {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} && \ + rm -rf /opt/tsg/sapp/r2 && \ + rm -rf /opt/tsg/sapp/r3 +# files +COPY files/quic/main.conf /opt/tsg/sapp/conf/quic/main.conf +COPY files/asymmetric_addr_layer.conf /opt/tsg/sapp/etc/ +COPY files/entrylist.conf /opt/tsg/sapp/etc/ +COPY files/firewall_l7_protocol.conf /opt/tsg/sapp/tsgconf/ +COPY files/http.conf /opt/tsg/sapp/conf/http/ +COPY files/necessary_plug_list.conf /opt/tsg/sapp/etc/ +COPY files/project_list.conf /opt/tsg/sapp/etc/ +COPY files/sapp_log.conf /opt/tsg/sapp/etc/ +COPY files/vlan_flipping_map.conf /opt/tsg/sapp/etc/ +COPY files/well_known_port.conf /opt/tsg/sapp/etc/ +# templates +COPY templates/* /templates/ +# scripts +COPY --chmod=755 entrypoint.sh /usr/local/bin/ + +WORKDIR /opt/tsg/sapp + +CMD ["/bin/bash"] diff --git a/containers/firewall/build.mk b/containers/firewall/build.mk new file mode 100644 index 00000000..e9c63240 --- /dev/null +++ b/containers/firewall/build.mk @@ -0,0 +1,3 @@ +rule_target := firewall +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/firewall/entrypoint.sh b/containers/firewall/entrypoint.sh new file mode 100644 index 00000000..6d318b12 --- /dev/null +++ b/containers/firewall/entrypoint.sh @@ -0,0 +1,88 @@ +#!/bin/sh -ex +source /usr/local/bin/entrypoint_public.sh + +IS_ENABLE_DOS_PROTECTOR="false" + +parse_args() +{ + if [ $# -eq 0 ]; then + echo "No arguments provided, using default configs. Skipping..." + return + fi + PARSED_OPTIONS=$(getopt -o "" -l enable_prestart,enable_interactive_startup,enable_dos_protector -- "$@") + + if [ $? -ne 0 ]; then + echo "Failed to parse arguments." + exit 1 + fi + eval set -- "$PARSED_OPTIONS" + + while true; do + case "$1" in + --enable_prestart) + IS_ENABLE_PRESTART="true" + shift ;; + --enable_interactive_startup) + IS_ENABLE_INTERACTIVE_STARTUP="true" + shift ;; + --enable_dos_protector) + IS_ENABLE_DOS_PROTECTOR="true" + shift ;; + --) + shift + break ;; + *) + echo "Unknown option: $1" + break ;; + esac + done +} + +enable_dos_protector() +{ + local podname=${HOSTNAME} + local CLUSTER_ANNOUNCE_PORT=$(read_nodeport_from_service ${podname}-8551 default) + local HEALTH_CHECK_ANNOUNCE_PORT=$(read_nodeport_from_service ${podname}-8552 default) + + sed -Ei -c "s|NODE_IP_LOCATION|${NODE_IP?}|g" /opt/tsg/sapp/tsgconf/main.conf + sed -Ei -c "s|CLUSTER_ANNOUNCE_PORT_LOCATION|${CLUSTER_ANNOUNCE_PORT?}|g" /opt/tsg/sapp/tsgconf/main.conf + sed -Ei -c "s|HEALTH_CHECK_ANNOUNCE_PORT_LOCATION|${HEALTH_CHECK_ANNOUNCE_PORT?}|g" /opt/tsg/sapp/tsgconf/main.conf +} + +# start +ldconfig + +parse_args "$@" + +mkdir -p /opt/tsg/etc/ + +render_template conflist.inf.j2 /opt/tsg/sapp/plug/conflist.inf +render_template firewall_logger_transmitter_schema.json.j2 /opt/tsg/sapp/tsgconf/firewall_logger_transmitter_schema.json +render_template firewall.inf.j2 /opt/tsg/sapp/plug/business/firewall/firewall.inf +render_template gdev.conf.j2 /opt/tsg/sapp/etc/gdev.conf +render_template http_main.conf.j2 /opt/tsg/sapp/conf/http/http_main.conf +render_template maat.conf.j2 /opt/tsg/sapp/tsgconf/maat.conf +render_template mail.conf.j2 /opt/tsg/sapp/conf/mail/mail.conf +render_template main.conf.j2 /opt/tsg/sapp/tsgconf/main.conf +render_template sapp.toml.j2 /opt/tsg/sapp/etc/sapp.toml +render_template send_raw_pkt.conf.j2 /opt/tsg/sapp/etc/send_raw_pkt.conf +render_template spec.toml.j2 /opt/tsg/sapp/stellar_plugin/spec.toml +render_template ssl_main.conf.j2 /opt/tsg/sapp/conf/ssl/ssl_main.conf +render_template tsg_device_tag.json.j2 /opt/tsg/etc/tsg_device_tag.json + +DEVICE_SN=$(read_device_sn_from_k8s_node_info) +echo "{\"sn\": \"$DEVICE_SN\"}" > /opt/tsg/etc/tsg_sn.json + +if [ ${IS_ENABLE_DOS_PROTECTOR} == "true" ]; then + enable_dos_protector +fi + +if [ ${IS_ENABLE_PRESTART} == "true" ]; then + enable_prestart +fi + +if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then + enable_interactive_startup +fi + +exec /opt/tsg/sapp/sapp \ No newline at end of file diff --git a/containers/firewall/files/asymmetric_addr_layer.conf b/containers/firewall/files/asymmetric_addr_layer.conf new file mode 100644 index 00000000..f4dcd827 --- /dev/null +++ b/containers/firewall/files/asymmetric_addr_layer.conf @@ -0,0 +1,9 @@ +#layer name definition: ipv4, ipv6, ethernet,vlan, arp, gre, mpls, pppoe, tcp, udp, l2tp, ppp, pptp, gtp +#pattern: asymmetric_layer_name[layer index] +#The symbol "*" represents any layer +ethernet[*] +vlan[*] +vxlan[*] +mpls[*] +gre[*] +gtp[*] diff --git a/containers/firewall/files/entrylist.conf b/containers/firewall/files/entrylist.conf new file mode 100644 index 00000000..bb9b411a --- /dev/null +++ b/containers/firewall/files/entrylist.conf @@ -0,0 +1,24 @@ +IP +IPV6 +IPV6_RAW +TCP_ALL +TCP +UDP +PHONY +POLLING +IPSEC +L2TP +PPTP +DNS +QUIC +HTTP +MAIL +FTP +SSL +RTP +SIP +SSH +SOCKS +STRATUM +RDP +DTLS \ No newline at end of file diff --git a/containers/firewall/files/firewall_l7_protocol.conf b/containers/firewall/files/firewall_l7_protocol.conf new file mode 100644 index 00000000..87914366 --- /dev/null +++ b/containers/firewall/files/firewall_l7_protocol.conf @@ -0,0 +1,61 @@ +#TYPE:1:UCHAR,2:USHORT,3:USTRING,4:ULOG,5:USTRING,6:FILE,7:UBASE64,8:PACKET +#TYPE FIELD VALUE +#STRING UNCATEGORIZED 8000 +#STRING UNCATEGORIZED 8001 +#STRING UNKNOWN_OTHER 8002 +STRING DNS 32 +STRING FTP 45 +STRING FTPS 751 +STRING HTTP 67 +STRING HTTPS 68 +STRING ICMP 70 +STRING IKE 8003 +STRING MAIL 8004 +STRING IMAP 75 +STRING IMAPS 76 +STRING IPSEC 85 +STRING XMPP 94 +STRING L2TP 98 +STRING NTP 137 +STRING POP3 147 +STRING POP3S 148 +STRING PPTP 153 +STRING QUIC 2521 +STRING SIP 182 +STRING SMB 185 +STRING SMTP 186 +STRING SMTPS 187 +STRING SPDY 1469 +STRING SSH 198 +STRING SSL 199 +STRING SOCKS 8005 +STRING TELNET 209 +STRING DHCP 29 +STRING RADIUS 158 +STRING OPENVPN 336 +STRING STUN 201 +STRING TEREDO 555 +STRING DTLS 1291 +STRING DoH 8006 +STRING ISAKMP 92 +STRING MDNS 3835 +STRING NETBIOS 129 +STRING NETFLOW 130 +STRING RDP 159 +STRING RTCP 174 +STRING RTP 175 +STRING SLP 8007 +STRING SNMP 190 +STRING SSDP 197 +STRING TFTP 211 +STRING BJNP 2481 +STRING LDAP 100 +STRING RTMP 337 +STRING RTSP 176 +STRING ESNI 8008 +STRING Stratum 8169 +STRING QQ 156 +STRING WeChat 1296 +STRING WIREGUARD 3700 +STRING MMS 115 +STRING RSYNC 173 \ No newline at end of file diff --git a/containers/firewall/files/http.conf b/containers/firewall/files/http.conf new file mode 100644 index 00000000..7310e49c --- /dev/null +++ b/containers/firewall/files/http.conf @@ -0,0 +1,43 @@ +#http_special +#all regions +1 HTTP_ALL +2 HTTP_OTHER_REGIONS +#http state +3 HTTP_STATE +4 HTTP_REQ_LINE +5 HTTP_RES_LINE +6 HTTP_CONTENT +7 HTTP_UNGZIP_CONTENT +8 HTTP_MESSAGE_URL +9 HTTP_URI +#http_request +10 HTTP_HOST +11 HTTP_REFERER +12 HTTP_USER_AGENT +13 HTTP_COOKIE +14 HTTP_PROXY_AUTHORIZATION +15 HTTP_AUTHORIZATION +#http_response +16 HTTP_LOCATION +17 HTTP_SERVER +18 HTTP_ETAG +#http_general +19 HTTP_DATE +20 HTTP_TRAILER +21 HTTP_TRANSFER_ENCODING +22 HTTP_VIA +23 HTTP_PRAGMA +24 HTTP_CONNECTION +#http_content +25 HTTP_CONT_ENCODING +26 HTTP_CONT_LANGUAGE +27 HTTP_CONT_LOCATION +28 HTTP_CONT_DISPOSITION +29 HTTP_CONT_RANGE +30 HTTP_CONT_LENGTH +31 HTTP_CONT_TYPE +32 HTTP_CHARSET +33 HTTP_EXPIRES +34 HTTP_X_FLASH_VERSION +35 HTTP_TRANSFER_LENGTH +36 Set-Cookie diff --git a/containers/firewall/files/necessary_plug_list.conf b/containers/firewall/files/necessary_plug_list.conf new file mode 100644 index 00000000..106f0dc0 --- /dev/null +++ b/containers/firewall/files/necessary_plug_list.conf @@ -0,0 +1,22 @@ +#以下插件如果加载,初始化失败, sapp平台会退出; +#插件的路径来自配置文件 ./plug/conflist.inf, 不需要加段落标识[platform],[protocol],[business]等. +#If the following plugins fail to initialize, the sapp platform will exit. +#The name of the plugin comes from the configuration ./plug/conflist.inf, section identification is not required. +./plug/protocol/sip/sip.inf +./plug/protocol/rtp/rtp.inf +./plug/protocol/ssl/ssl.inf +./plug/protocol/ssh/ssh.inf +./plug/protocol/http/http.inf +./plug/protocol/dns/dns.inf +./plug/protocol/mail/mail.inf +./plug/protocol/ftp/ftp.inf +./plug/protocol/quic/quic.inf +./plug/protocol/rdp/rdp.inf +./plug/protocol/l2tp_protocol_plug/l2tp_protocol_plug.inf +./plug/business/kni/kni.inf +./plug/business/conn_telemetry/conn_telemetry.inf +./plug/business/http_healthcheck/http_healthcheck.inf +./plug/platform/tsg_ddos_sketch/tsg_ddos_sketch.inf 1 +./plug/business/firewall/firewall.inf +./plug/stellar_on_sapp/start_loader.inf +./plug/stellar_on_sapp/defer_loader.inf \ No newline at end of file diff --git a/containers/firewall/files/project_list.conf b/containers/firewall/files/project_list.conf new file mode 100644 index 00000000..ce5e9a35 --- /dev/null +++ b/containers/firewall/files/project_list.conf @@ -0,0 +1,20 @@ +tcp_flow_stat struct +udp_flow_stat struct +tcp_deduce_flow_stat struct +POLICY_PRIORITY struct +ESTABLISH_LATENCY long +MAIL_IDENTIFY int +TSG_MASTER_INTERNAL_LABEL struct +APP_ID_LABEL struct +BASIC_PROTO_LABEL struct +USER_DEFINED_ATTRIBUTE struct +SKETCH_TRANS_LAYER_CTX_LABEL struct +SKETCH_PROTO_CTX_LABEL struct +common_link_info_c2s struct +common_link_info_s2c struct +common_link_info struct +JA3_FINGERPRINT_LABEL struct +DKPT_PRO_V2 struct +DPKT_PROJECT_V2 struct +PPROJECT_PRO_V2 struct +DPKT_BHSTAT_PROJECT struct diff --git a/containers/firewall/files/quic/main.conf b/containers/firewall/files/quic/main.conf new file mode 100644 index 00000000..c66168ec --- /dev/null +++ b/containers/firewall/files/quic/main.conf @@ -0,0 +1,2 @@ +[QUIC] +QUIC_PORT_LIST=443;8443;4433; diff --git a/containers/firewall/files/sapp_log.conf b/containers/firewall/files/sapp_log.conf new file mode 100644 index 00000000..0c9ad6a3 --- /dev/null +++ b/containers/firewall/files/sapp_log.conf @@ -0,0 +1,18 @@ +[global] +default format = "%d(%c), %V, %U, %m%n" +rotate lock file = /tmp/sapp_zlog.lock +file perms = 644 +[levels] +DEBUG=10 +INFO=20 +FATAL=30 +STOP=40 +[formats] +other = "%d(%c), %V, %F, %U, %m%n" +plugin = "%d(%c), %m%n" +[rules] +sapp_log.fatal "./log/runtimelog.%d(%F)", 500M ~ "./log/runtimelog.%d(%F).#2s" +sapp_plugin_log.fatal >stdout; plugin +sapp_plugin_log.info "./log/plugin.log.%d(%F)", 500M ~ "./log/plugin.log.%d(%F).#2s"; plugin +sapp_process_latency_log.fatal "./log/sapp_process_latency.log.%d(%F)", 500M ~ "./log/sapp_process_latency.log.%d(%F).#2s" +!.fatal "./log/%c.%d(%F)", 500M ~ "./log/%c.%d(%F).#2s"; other diff --git a/containers/firewall/files/vlan_flipping_map.conf b/containers/firewall/files/vlan_flipping_map.conf new file mode 100644 index 00000000..d4a7248d --- /dev/null +++ b/containers/firewall/files/vlan_flipping_map.conf @@ -0,0 +1,104 @@ +#for inline a device vlan flipping +#数据包来自C路由器端, 即C2I(I2E)方向, +#数据包来自I路由器端, 即I2C(E2I)方向, +#平台会根据vlan_id,设置当前包route_dir的值, 以便上层业务插件做两个方向的流量统计, +#如果一对vlan_id写反了, 网络是通的, 但是I2E,E2I的流量统计就颠倒了. +#配置文件格式, pattern: +#来自C路由器vlan_id 来自I路由器vlan_id 是否开启mac地址翻转 +#C_rout r_vlan_id I_router_vlan_id mac_flipping_enable +1000 1001 0 +1002 1003 0 +1004 1005 0 +1006 1007 0 +1008 1009 0 +1010 1011 0 +1012 1013 0 +1014 1015 0 +1016 1017 0 +1018 1019 0 +1020 1021 0 +1022 1023 0 +1024 1025 0 +1026 1027 0 +1028 1029 0 +1030 1031 0 +1032 1033 0 +1034 1035 0 +1036 1037 0 +1038 1039 0 +1040 1041 0 +1042 1043 0 +1044 1045 0 +1046 1047 0 +1048 1049 0 +1050 1051 0 +1052 1053 0 +1054 1055 0 +1056 1057 0 +1058 1059 0 +1060 1061 0 +1062 1063 0 +1064 1065 0 +1066 1067 0 +1068 1069 0 +1070 1071 0 +1072 1073 0 +1074 1075 0 +1076 1077 0 +1078 1079 0 +1080 1081 0 +1082 1083 0 +1084 1085 0 +1086 1087 0 +1088 1089 0 +1090 1091 0 +1092 1093 0 +1094 1095 0 +1096 1097 0 +1098 1099 0 +1100 1101 0 +1102 1103 0 +1104 1105 0 +1106 1107 0 +1108 1109 0 +1110 1111 0 +1112 1113 0 +1114 1115 0 +1116 1117 0 +1118 1119 0 +1120 1121 0 +1122 1123 0 +1124 1125 0 +1126 1127 0 +4000 4001 0 +4002 4003 0 +4004 4005 0 +4006 4007 0 +4008 4009 0 +4010 4011 0 +4012 4013 0 +4014 4015 0 +4016 4017 0 +4018 4019 0 +4020 4021 0 +4022 4023 0 +4024 4025 0 +4026 4027 0 +4028 4029 0 +4030 4031 0 +4032 4033 0 +4034 4035 0 +4036 4037 0 +4038 4039 0 +4040 4041 0 +4042 4043 0 +4044 4045 0 +4046 4047 0 +4048 4049 0 +4050 4051 0 +4052 4053 0 +4054 4055 0 +4056 4057 0 +4058 4059 0 +4060 4061 0 +4062 4063 0 \ No newline at end of file diff --git a/containers/firewall/files/well_known_port.conf b/containers/firewall/files/well_known_port.conf new file mode 100644 index 00000000..e4bf2c1a --- /dev/null +++ b/containers/firewall/files/well_known_port.conf @@ -0,0 +1,9 @@ +# The following ports are considered as server, when creating a new UDP stream or TCP stream without SYN(SYN/ACK) packet. +# You can add other ports according to your needs. +[TCP] +#http +8080 + +[UDP] +#OICQ +8000 diff --git a/containers/firewall/manifest.yaml b/containers/firewall/manifest.yaml new file mode 100644 index 00000000..573bdc42 --- /dev/null +++ b/containers/firewall/manifest.yaml @@ -0,0 +1,131 @@ +packages: + - name: sapp-pr + version: 4.3.67.07feab9 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: tcpdump_mesa + version: 1.0.13.6ec67f5 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/framework" + - name: conn_telemetry + version: 1.0.3.4ef6df6 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: firewall + version: 3.5.1.d5e256a + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg" + - name: glimpse_detector + version: 3.2.0.0069e3b + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: qdpi_detector + version: 5.0.2.90682ec + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: dns + version: 2.1.7.1da8dfa + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: ftp + version: 1.0.16.d996236 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: http + version: 2.0.20.0571d0b + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: mail + version: 1.0.22.431a81f + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: quic + version: 2.0.11.1ab2559 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: ssl + version: 3.2.0.93d17f6 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: rtp + version: 1.0.7.530ac76 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: ssh + version: 2.1.7.b053e65 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: dtls + version: 2.0.5.a559144 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: mesa_sip + version: 2.1.1.6504027 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: deal_socks + version: 1.0.4.329bba3 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: stratum + version: 1.1.3.82ba152 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: rdp + version: 1.0.3.f392ffd + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: session_flags + version: 2.4.0.579bcde + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: stat_policy_enforcer + version: 3.5.1.3a39801 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: sf_classifier + version: 2.2.0.1f91efa + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: stellar-on-sapp + version: 2.1.7.4e4f933 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg" + - name: policy_sketch + version: 1.2.0.43bd6ec + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: traffic_sketch + version: 1.1.5.a38497d + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" + - name: http_healthcheck + version: 2.0.2.969442a + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/sapp" \ No newline at end of file diff --git a/containers/firewall/templates/conflist.inf.j2 b/containers/firewall/templates/conflist.inf.j2 new file mode 100644 index 00000000..97daba1a --- /dev/null +++ b/containers/firewall/templates/conflist.inf.j2 @@ -0,0 +1,54 @@ +[platform] +./plug/stellar_on_sapp/start_loader.inf + +[protocol] +{%- if decoders.SOCKS == True %} +./plug/protocol/deal_socks/deal_socks.inf +{%- endif %} +{%- if decoders.SIP == True %} +./plug/protocol/sip/sip.inf +{%- endif %} +{%- if decoders.RTP == True %} +./plug/protocol/rtp/rtp.inf +{%- endif %} +{%- if decoders.SSL == True %} +./plug/protocol/ssl/ssl.inf +{%- endif %} +{%- if decoders.HTTP == True %} +./plug/protocol/http/http.inf +{%- endif %} +{%- if decoders.DNS == True %} +./plug/protocol/dns/dns.inf +{%- endif %} +{%- if decoders.MAIL == True %} +./plug/protocol/mail/mail.inf +{%- endif %} +{%- if decoders.FTP == True %} +./plug/protocol/ftp/ftp.inf +{%- endif %} +{%- if decoders.QUIC == True %} +./plug/protocol/quic/quic.inf +{%- endif %} +./plug/protocol/l2tp_protocol_plug/l2tp_protocol_plug.inf +{%- if decoders.SSH == True %} +./plug/protocol/ssh/ssh.inf +{%- endif %} +{%- if decoders.STRATUM == True %} +./plug/protocol/stratum/stratum.inf +{%- endif %} +{%- if decoders.RDP == True %} +./plug/protocol/rdp/rdp.inf +{%- endif %} +{%- if decoders.DTLS == True %} +./plug/protocol/dtls/dtls.inf +{%- endif %} + +[business] +{%- if firewall.enable == True %} +./plug/business/firewall/firewall.inf +{%- endif %} +./plug/stellar_on_sapp/defer_loader.inf +./plug/business/http_healthcheck/http_healthcheck.inf +{%- if decoders.SSL == True %} +./plug/protocol/ssl/ssl_defer.inf +{%- endif %} \ No newline at end of file diff --git a/containers/firewall/templates/firewall.inf.j2 b/containers/firewall/templates/firewall.inf.j2 new file mode 100644 index 00000000..e070c31c --- /dev/null +++ b/containers/firewall/templates/firewall.inf.j2 @@ -0,0 +1,77 @@ +[PLUGINFO] +PLUGNAME=FIREWEALL +SO_PATH=./plug/business/firewall/firewall.so +INIT_FUNC=firewall_init +DESTROY_FUNC=firewall_destory + +{%- if decoders.HTTP == True %} +[HTTP] +FUNC_FLAG=ALL +FUNC_NAME=firewall_http_plug_entry +{%- endif %} + +{%- if decoders.SSL == True %} +[SSL] +FUNC_FLAG=SSL_CLIENT_HELLO,SSL_SERVER_HELLO,SSL_APPLICATION_DATA,SSL_CERTIFICATE_DETAIL +FUNC_NAME=firewall_ssl_plug_entry +{%- endif %} + +{%- if decoders.DNS == True %} +[DNS] +FUNC_FLAG=ALL +FUNC_NAME=firewall_dns_plug_entry +{%- endif %} + +{%- if decoders.MAIL == True %} +[MAIL] +FUNC_FLAG=ALL +FUNC_NAME=firewall_mail_plug_entry +{%- endif %} + +{%- if decoders.RTP == True %} +[RTP] +FUNC_FLAG=ALL +FUNC_NAME=firewall_rtp_plug_entry +{%- endif %} + +{%- if decoders.SIP == True %} +[SIP] +FUNC_FLAG=ALL +FUNC_NAME=firewall_sip_plug_entry +{%- endif %} + +{%- if decoders.FTP == True %} +[FTP] +FUNC_FLAG=ALL +FUNC_NAME=firewall_ftp_plug_entry +{%- endif %} + +{%- if decoders.QUIC == True %} +[QUIC] +FUNC_FLAG=QUIC_CLIENT_HELLO,QUIC_SERVER_HELLO,QUIC_CACHED_CERT,QUIC_COMM_CERT,QUIC_CERT_CHAIN,QUIC_VERSION,QUIC_APPLICATION_DATA +FUNC_NAME=firewall_quic_plug_entry +{%- endif %} + +{%- if decoders.DTLS == True %} +[DTLS] +FUNC_FLAG=ALL +FUNC_NAME=firewall_dtls_plug_entry +{%- endif %} + +{%- if decoders.STRATUM == True %} +[STRATUM] +FUNC_FLAG=ALL +FUNC_NAME=firewall_stratum_plug_entry +{%- endif %} + +{%- if decoders.RDP == True %} +[RDP] +FUNC_FLAG=ALL +FUNC_NAME=firewall_rdp_plug_entry +{%- endif %} + +{%- if decoders.SSH == True %} +[SSH] +FUNC_FLAG=ALL +FUNC_NAME=firewall_ssh_plug_entry +{%- endif %} diff --git a/containers/firewall/templates/firewall_logger_transmitter_schema.json.j2 b/containers/firewall/templates/firewall_logger_transmitter_schema.json.j2 new file mode 100644 index 00000000..971e528c --- /dev/null +++ b/containers/firewall/templates/firewall_logger_transmitter_schema.json.j2 @@ -0,0 +1,379 @@ +{% import '/templates/macros.j2' as macros -%} +{ + {%- if firewall.logs.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "channel_list": [ + {%- if external_resources.olap.udp_collectors.enable == True %} + { + "channel": "udpsock", + "collector": "{{ macros.address_port_pairs_render(external_resources.olap.udp_collectors.addresses,",") }}" + }, + {%- endif %} + { + "channel": "kafka", + "broker_list": "{{ macros.address_port_pairs_render(external_resources.olap.kafka_brokers.addresses,",") }}", + "sasl_username": "{{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_username") }}", + "sasl_password": "{{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_password") }}", + "compression": "snappy", + "refresh_interval_ms": "600000", + "send_queue_max_msg": "1000000", + "required_acks": "1" + } + ], + "format_list": [ + "json", + "ipfix", + "mpack" + ], + "ringbuff": { + "size": {{ firewall.logs.ringbuf.size }}, + "num": 2 + }, + "transmitter_list": [ + {%- if external_resources.olap.udp_collectors.enable == True %} + { + "switch": "on", + "async": "off", + "name": "IPFIX-TEMPLATE", + "topic": "IPFIX-TEMPLATE", + "mode": [ + { + "channel": "udpsock", + "format": [ + "ipfix" + ] + } + ] + }, + {%- endif %} + { + {%- if session_record.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "SESSION-RECORD", + "topic": "SESSION-RECORD", + "client_id": "SESSION-RECORD", + "mode": [ + {%- if external_resources.olap.udp_collectors.enable == True %} + { + "channel": "udpsock", + "format": [ + "ipfix" + ] + }, + {%- endif %} + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + {%- if transaction_record.enable_http == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "HTTP-TRANSACTION-RECORD", + "topic": "TRANSACTION-RECORD", + "client_id": "TRANSACTION-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + {%- if transaction_record.enable_mail == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "MAIL-TRANSACTION-RECORD", + "topic": "TRANSACTION-RECORD", + "client_id": "TRANSACTION-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + {%- if transaction_record.enable_dns == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "DNS-TRANSACTION-RECORD", + "topic": "TRANSACTION-RECORD", + "client_id": "TRANSACTION-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + {%- if voip_record.enable_sip == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "SIP-VOIP-RECORD", + "topic": "VOIP-RECORD", + "client_id": "VOIP-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + {%- if voip_record.enable_rtp == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "RTP-VOIP-RECORD", + "topic": "VOIP-RECORD", + "client_id": "VOIP-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + {%- if file_stream_record.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "POLICY-PACKET-TRAFFIC-FILE-STREAM-RECORD", + "topic": "TRAFFIC-POLICY-CAPTURE-FILE-STREAM-RECORD", + "client_id": "TRAFFIC-POLICY-CAPTURE-FILE-STREAM-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "mpack" + ] + } + ] + }, + { + {%- if file_stream_record.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "HTTP-REQ-BODY-TRAFFIC-FILE-STREAM-RECORD", + "topic": "TRAFFIC-HTTP-FILE-STREAM-RECORD", + "client_id": "TRAFFIC-HTTP-FILE-STREAM-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "mpack" + ] + } + ] + }, + { + {%- if file_stream_record.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "HTTP-RES-BODY-TRAFFIC-FILE-STREAM-RECORD", + "topic": "TRAFFIC-HTTP-FILE-STREAM-RECORD", + "client_id": "TRAFFIC-HTTP-FILE-STREAM-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "mpack" + ] + } + ] + }, + { + {%- if file_stream_record.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "MAIL-EML-TRAFFIC-FILE-STREAM-RECORD", + "topic": "TRAFFIC-EML-FILE-STREAM-RECORD", + "client_id": "TRAFFIC-EML-FILE-STREAM-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "mpack" + ] + } + ] + }, + { + {%- if file_stream_record.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "RTP-PACKET-TRAFFIC-FILE-STREAM-RECORD", + "topic": "TRAFFIC-RTP-FILE-STREAM-RECORD", + "client_id": "TRAFFIC-RTP-FILE-STREAM-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "mpack" + ] + } + ] + }, + { + {%- if packet_capture.enable == True %} + "switch": "on", + {%- else %} + "switch": "off", + {%- endif %} + "async": "on", + "name": "TROUBLESHOOTING-FILE-STREAM-RECORD", + "topic": "TROUBLESHOOTING-FILE-STREAM-RECORD", + "client_id": "TROUBLESHOOTING-FILE-STREAM-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "mpack" + ] + } + ] + }, + { + "switch": "on", + "async": "off", + "name": "DOS-SKETCH-RECORD", + "topic": "DOS-SKETCH-RECORD", + "client_id": "DOS-SKETCH-RECORD", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + "switch": "on", + "async": "off", + "name": "POLICY-RULE-METRIC", + "topic": "POLICY-RULE-METRIC", + "client_id": "POLICY-RULE-METRIC", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + "switch": "on", + "async": "off", + "name": "NETWORK-TRAFFIC-METRIC", + "topic": "NETWORK-TRAFFIC-METRIC", + "client_id": "NETWORK-TRAFFIC-METRIC", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + "switch": "on", + "async": "off", + "name": "TRAFFIC-TOP-METRIC", + "topic": "TRAFFIC-TOP-METRIC", + "client_id": "TRAFFIC-TOP-METRIC", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + "switch": "on", + "async": "off", + "name": "STATISTICS-RULE-METRIC", + "topic": "STATISTICS-RULE-METRIC", + "client_id": "STATISTICS-RULE-METRIC", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + }, + { + "switch": "on", + "async": "off", + "name": "OBJECT-STATISTICS-METRIC", + "topic": "OBJECT-STATISTICS-METRIC", + "client_id": "OBJECT-STATISTICS-METRIC", + "mode": [ + { + "channel": "kafka", + "format": [ + "json" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/containers/firewall/templates/gdev.conf.j2 b/containers/firewall/templates/gdev.conf.j2 new file mode 100644 index 00000000..5d350791 --- /dev/null +++ b/containers/firewall/templates/gdev.conf.j2 @@ -0,0 +1,6 @@ +{% import '/templates/macros.j2' as macros -%} +[Module] +pcapdevice={{ nic_raw_name }} +sendto_gdev_card={{ nic_raw_name }} +sendto_gdev_ip={{ macros.safe_read(etherfabric_settings,"keepalive.ip") }} +gdev_status_switch=1 diff --git a/containers/firewall/templates/http_main.conf.j2 b/containers/firewall/templates/http_main.conf.j2 new file mode 100644 index 00000000..af16a52e --- /dev/null +++ b/containers/firewall/templates/http_main.conf.j2 @@ -0,0 +1,37 @@ +[FUNCTION] +switch_no_biz=1 + +#0 means close stat +stat_cycle=0 +#stat output screen 0: screen 1: file +stat_screen_print=0 +stat_file=./log/http/http_stat.log + +#ungzip +{%- if decoders.HTTP_GZIP == True %} +ungzip_switch=1 +{%- else %} +ungzip_switch=0 +{%- endif %} + +#support proxy +proxy_switch=1 + +#single-way traffic need http session num, 0 means no this function +singleway_maxseq=2 + +#0: field callback mode(default) 1:batch callback mode +callback_mode=0 + +#batch field maxnum when http_all or http_other +batch_field_maxnum=32 + +#check HEAD when s2c one-way +s2c_head_check_switch=1 + +[LOG] +#FATAL:wrong info +#INFO: lostlen; special proc ;proxy info +#DEBUG: pending and close info; all url; +log_level=30 +log_path=./log/http/runtime diff --git a/containers/firewall/templates/maat.conf.j2 b/containers/firewall/templates/maat.conf.j2 new file mode 100644 index 00000000..64596a93 --- /dev/null +++ b/containers/firewall/templates/maat.conf.j2 @@ -0,0 +1,42 @@ +{% import '/templates/macros.j2' as macros -%} +[CM_STATIC_MAAT] +###file, json, redis +MAAT_MODE=redis +STAT_SWITCH=1 +PERF_SWITCH=0 +HIT_GROUP_SWITCH=1 +TABLE_INFO=tsgconf/firewall_cm_maat_tableinfo.json +STAT_FILE=metrics/firewall_cm_maat_stat.json +EFFECT_INTERVAL_MS=1000 +GARBAGE_COLLECT_MS=60000 +RULE_UPDATE_CHECK_INTERVAL_MS=1000 +REDIS_IP={{ macros.cm_address(external_resources.cm) }} +REDIS_PORT={{ macros.cm_port(external_resources.cm) }} +REDIS_INDEX={{ vsys_id }} +JSON_CFG_FILE=tsgconf/firewall_cm_maat_rule.json +INC_CFG_DIR=tsgrule/inc/index/ +FULL_CFG_DIR=tsgrule/full/index/ +EFFECTIVE_RANGE_FILE=/opt/tsg/etc/tsg_device_tag.json +LOG_PATH="log/firewall.cm.maat" + +[SD_DYNAMIC_MAAT] +MAAT_MODE=redis +STAT_SWITCH=1 +PERF_SWITCH=1 +TABLE_INFO=tsgconf/firewall_sd_maat_tableinfo.json +STAT_FILE=metrics/firewall_sd_maat_stat.json +EFFECT_INTERVAL_MS={{ external_resources.sd.policy_effect_interval_ms }} +GARBAGE_COLLECT_MS={{ external_resources.sd.policy_garbage_collection_interval_ms }} +RULE_UPDATE_CHECK_INTERVAL_MS={{ external_resources.sd.policy_update_check_interval_ms }} +REDIS_IP={{ macros.sd_address(external_resources.sd) }} +REDIS_PORT_NUM=1 +REDIS_PORT={{ macros.sd_port(external_resources.sd) }} +REDIS_INDEX={{ external_resources.sd.db_index }} +JSON_CFG_FILE=tsgconf/firewall_sd_maat_rule.json +INC_CFG_DIR=tsgrule/inc/index/ +FULL_CFG_DIR=tsgrule/full/index/ +EFFECTIVE_RANGE_FILE=/opt/tsg/etc/tsg_device_tag.json +LOG_PATH="log/firewall.sd.maat" + +[MAAT] +ACCEPT_TAGS={"tags":[{{ macros.device_tag_list(device) }}]} diff --git a/containers/firewall/templates/mail.conf.j2 b/containers/firewall/templates/mail.conf.j2 new file mode 100644 index 00000000..5eb20899 --- /dev/null +++ b/containers/firewall/templates/mail.conf.j2 @@ -0,0 +1,24 @@ +[MODULE] +LOG_PATH=./log/mail +LOG_LEVEL=20 + +#USER_DEFINE_REGION=X-mailer,Message-ID + +#IMAP BODY/BODYSTRUCTURE information +HTABLE_SIZE=65536 +HTABLE_EXPIRE_TIME=1800 + +#whether to decode BASE64/QP, 0:OFF, 1:ON(default) +{%- if decoders.MAIL_BASE64 == True %} +TRANS_DECODE_SWITCH=1 +{%- else %} +TRANS_DECODE_SWITCH=0 +{%- endif %} +#0: callback biz per packet; 1: callback biz per line(default) +CALLBACK_BIZ_LINE=1 + +STAT_FIELD_CYCLE=10 +STAT_FIELD_TRIG=0 +STAT_FIELD_APPNAME=MAIL_PRO +STAT_FIELD_DST_IP=10.10.10.68 +STAT_FIELD_DST_PORT=8125 diff --git a/containers/firewall/templates/main.conf.j2 b/containers/firewall/templates/main.conf.j2 new file mode 100644 index 00000000..9453650e --- /dev/null +++ b/containers/firewall/templates/main.conf.j2 @@ -0,0 +1,144 @@ +{% import '/templates/macros.j2' as macros -%} +[MAAT] +PROFILE="./tsgconf/maat.conf" +{%- if external_resources.sd.enable is defined and external_resources.sd.enable == True %} +DYNAMIC_MAPPING_MAAT_SWITCH=1 +{%- else %} +DYNAMIC_MAPPING_MAAT_SWITCH=0 +{%- endif %} + +DEVICE_TAG_FILE=/opt/tsg/etc/tsg_device_tag.json +ACCEPT_TAGS={"tags":[{{ macros.device_tag_list(device) }}]} + +[TSG_LOG] +IPFIX_SCHEMA_PROFILE=./tsgconf/firewall_logger_ipfix_schema.json +LOGGER_SCHEMA_PROFILE=./tsgconf/firewall_logger_transmitter_schema.json + +TRAFFIC_VSYSTEM_ID={{ vsys_id }} + +{%- if firewall.logs.contains_app_id.enable == True %} +SEND_APP_ID_SWITCH=1 +{%- else %} +SEND_APP_ID_SWITCH=0 +{%- endif %} +{%- if firewall.logs.contains_dns_resource_record.enable == True %} +SEND_DNS_RR_SWITCH=1 +{%- else %} +SEND_DNS_RR_SWITCH=0 +{%- endif %} + +[SYSTEM] +DATACENTER_ID={{ session_id_generator.snowflake_worker_id_base }} +LOG_LEVEL=30 +LOG_PATH="firewall.log" +DEVICE_SEQ_IN_DATA_CENTER={{ session_id_generator.snowflake_worker_id_offset }} +SERVICE_CHAINING_SID={{ sid.sce }} +SHAPING_SID={{ sid.shaping }} +PROXY_SID={{ sid.proxy }} +{%- if decoders.SSL_JA3 == True %} +GENERATE_JA3_FINGERPRINT=1 +{%- else %} +GENERATE_JA3_FINGERPRINT=0 +{%- endif %} +MAX_SCAN_TCP_PKT_COUNT=8 +MAX_SCAN_UDP_PKT_COUNT=8 +PERIODIC_SCAN_INTERVAL_MS=120000 +OSFP_DB_JSON_PATH=tsgconf/firewall_osfp_db.json +L7_PROTOCOL_FILE=./tsgconf/firewall_l7_protocol.conf + +{% if appsketch.context_based_detector == True and appsketch.enable == True %} +APPSKETCH_SWITCH=1 +{%- else %} +APPSKETCH_SWITCH=0 +{%- endif %} + +[FIREWALL] +# hijack, replace +PACKET_RESPONSE_MODE=replace +HTTP_PAGE200=./tsgconf/HTTP200.html +HTTP_PAGE204=./tsgconf/HTTP204.html +HTTP_PAGE403=./tsgconf/HTTP403.html +HTTP_PAGE404=./tsgconf/HTTP404.html + +[FIREWALL_LOCAL_STAT] +STAT_NAME="firewall" +STAT_INTERVAL_TIME_S=5 +STAT_OUTPATH="metrics/firewall_local_file_stat.json" + +[APP_SKETCH_FEEDBACK] +QOS=0 +PUBLISH_TOPIC="APP_SIGNATURE_ID" +#CLIENT_ID= +#BROKER_IP= +#BROKER_PORT= + +[qdpi_detector] +debug_swtich=30 +intput_max_packet=20 +qmdpi_engine_config=injection_mode=stream;nb_workers={{ sapp_affinity | length }};nb_flows=8000;basic_dpi_enable=1;classification_cache_enable=0;fm_flow_table_alloc_mode=0 + +[TRAFFIC_MIRROR] +{%- if traffic_mirror.enable_raw_traffic == True %} +TRAFFIC_MIRROR_ENABLE=1 +{%- else %} +TRAFFIC_MIRROR_ENABLE=0 +{%- endif %} +NIC_NAME="{{ macros.safe_read(nic_mirror_name,"firewall") }}" +APP_NAME="firewall-mirror-{{ app_symbol_index }}" +DEFAULT_VLAN_ID=0 + +[PROTO_IDENTIFY] +MAX_IDENTIFY_PACKETS=10 + +[SESSION_FLAGS] +#RANDOM_LOOKING_JUDGE_LIST={"random_looking_judge_list":[ "frequency", "block_frequency", "cumulative_sums", "runs", "longest_run", "rank", "non_overlapping_template_matching", "overlapping_template_matching", "universal", "random_excursions", "random_excursions_variant", "poker_detect", "runs_distribution", "self_correlation", "binary_derivative" ]} +FET_ENABLED=1 +RANDOM_LOOKING_UDP_IGNORE_PKTS=-1 +RANDOM_LOOKING_JUDGE_LIST={"random_looking_judge_list":[]} +TUNNELING_PCRE_LIST={"tunneling_pcre_list":["(B|C)(d){3,5}(a|b|c|d)(A|B)b(A|B|C|D)", "(B|C)(d){3,5}(a|b|c|d)Aa(A|B|C|D)", "(B|C)(d){2}(b|c)(A|B)b(A|B|C|D)", "(B|C)(d){2}(b|c)Aa(A|B|C|D)"]} + +[SF_CLASSIFIER] +SYNC_MODE=1 + +{% if stat_policy_enforcer.enable == True -%} +[STAT_POLICY_ENFORCER] +CYCLE_INTERVAL_S=1 +SESSION_UPDATE_MS=250 +{%- endif %} + +{% if traffic_sketch.enable == True -%} +[TRAFFIC_SKETCH] +APP_AND_TRAFFIC_CYCLE_S=1 +APP_AND_TRAFFIC_CYCLE_UPDATE_MS=250 +TOPK_CYCLE_S=60 +TOPK_UPDATE_MS=1000 +DOS_CYCLE_S=60 +DOS_UPDATE_MS=1000 +SWITCH_TRAFFIC_SKETCH=1 +{%- endif %} + +{% if policy_sketch.enable == True -%} +[POLICY_SKETCH] +OBJECT_CYCLE_S=1 +OBJECT_UPDATE_MS=250 +RULE_HITS_CYCLE_S=1 +RULE_HITS_UPDATE_MS=250 +{%- endif %} + +[DOS_PROTECTOR] +{% if dos_protector.enable == True -%} +DOS_PROTECTOR_ENABLE=1 +OUTPUT_INTERVAL_MS=60000 +METRICS_OUTPUT_INTERVAL_MS=60000 +SWARMKV_CLUSTER_NAME="dos_protection_vsys{{ vsys_id }}" +SWARMKV_NODE_IP="0.0.0.0" +SWARMKV_NODE_PORT=8551 +SWARMKV_CONSUL_IP="NODE_IP_LOCATION" +SWARMKV_CONSUL_PORT=8500 +SWARMKV_CLUSTER_ANNOUNCE_IP="NODE_IP_LOCATION" +SWARMKV_CLUSTER_ANNOUNCE_PORT=CLUSTER_ANNOUNCE_PORT_LOCATION +SWARMKV_HEALTH_CHECK_PORT=8552 +SWARMKV_HEALTH_CHECK_ANNOUNCE_PORT=HEALTH_CHECK_ANNOUNCE_PORT_LOCATION +{%- else %} +DOS_PROTECTOR_ENABLE=0 +{%- endif %} diff --git a/containers/firewall/templates/sapp.toml.j2 b/containers/firewall/templates/sapp.toml.j2 new file mode 100644 index 00000000..7c72a2b1 --- /dev/null +++ b/containers/firewall/templates/sapp.toml.j2 @@ -0,0 +1,274 @@ +################################################################################################### +# NOTE: +# The format of this file is toml (https://github.com/cktan/tomlc99) +# to make vim editor display colorful and human readable, +# you can create a symbolic links named sapp.ini to sapp.toml, ln -sf sapp.toml sapp.ini +################################################################################################### + +[SYSTEM] +instance_name = "firewall-{{ app_symbol_index }}" + +[CPU] +### note, bind_mask, if you do not want to bind thread to special CPU core, keep it empty as [] +worker_threads={{ sapp_affinity | length }} +send_only_threads_max=0 +bind_mask=[{{ sapp_affinity | join (',') }}] + +[MEM] +dictator_enable=0 + +[PACKET_IO] + + [overlay_tunnel_definition] +### note, since 2020-10-01, L2-L3 tunnel(VLAN,MPLS,PPPOE,etc.) is process and offload by mrtunnat, +### after 2020-10-01, sapp support L2-L3 tunnel(VLAN,MPLS,PPPOE,etc.) without mrtunnat. + l2_l3_tunnel_support=1 + +### note, optional value is [none, vxlan, nf] + overlay_mode="nf" + [packet_io.feature] + + destroy_all_plug_enabled = 0 + + ### note, used to represent inbound or outbound direction value, + ### because it comes from Third party device, so it needs to be specified manually, + ### if inbound_route_dir=1, then outbound_route_dir=0, vice versa, + ### in other words, outbound_route_dir = 1 ^ inbound_route_dir; + inbound_route_dir=1 + +### note, BSD_packet_filter, if you do not want to set any filter rule, keep it empty as "" + BSD_packet_filter="" + +### note, same as tcpdump -Q/-P arg, possible values are `in', `out' and `inout', default is "in" + pcap_capture_direction="in" + + +### note, depolyment.mode options: [sys_route, vxlan_by_inline_device, raw_ethernet_single_gateway, raw_ethernet_multi_gateway] +### sys_route: send ip(ipv6) packet by system route table, this is default mode in mirror mode; +### vxlan_by_inline_device: encapsulation inject packet with vxlan, and then send to inline device by udp socket. +### raw_ethernet_single_gateway: send layer2 ethernet packet to specific gateway in same broadcast domain. +### raw_ethernet_multi_gateway: send layer2 ethernet packet to multiple gateway in same broadcast domain. + inject_pkt_mode="default" + inject_pkt_prepend_segment_id={{ sid.inject_adapter }} +### note, this config is valid if inject_pkt_mode==vxlan_by_inline_device, means udp socket src port. + #inject_mode_inline_device_sport=54789 + +### note, this config is valid if inject_pkt_mode==raw_ethernet_single_gateway. + #inject_mode_single_gateway_device="eth1" +### inject_mode_single_gateway_src_mac has lower priority than get smac from inject_mode_single_gateway_device + #inject_mode_single_gateway_src_mac="00:11:22:77:88:99" + #inject_mode_single_gateway_dst_mac="00:11:22:33:44:55" + #dumpfile_sleep_time_before_exit=3 + +### note, depolyment.mode options: [mirror, inline, transparent] + [packet_io.deployment] + mode="inline" + +### note, interface.type options: [pag,pcap,marsio] + [packet_io.internal.interface] + type="marsio" + name="{{ nic_raw_name }}" + [packet_io.external.interface] + type="pcap" + name="lo" + + [packet_io.polling] +### note, polling_priority = call sapp_recv_pkt every call polling_entry times, + polling_priority=100 + + [packet_io.under_ddos] +### note, to reduce impact of ddos attack,set some stream bypass, all plugins will not process these streams +{%- if overload_protection.enable == True %} +stream_bypass_enabled=1 +{%- else %} +stream_bypass_enabled=0 +{%- endif %} + + +### note, cpu usage value is percent, for example, config value is 85, means 85%, valid range: [1,100] +### sapp change to bypass state immediately when realtime cpu usage > bypass_trigger_cpu_usage +bypass_trigger_cpu_usage={{ overload_protection.detect_threshold_cpu_usages }} + + +### note, unit of get_cpu_usage_interval is milliseconds(ms) + get_cpu_usage_interval={{ overload_protection.detect_interval_in_ms }} +### note, use the average of the last $smooth_avg_window times as current realtime value + smooth_avg_window={{ overload_protection.detect_smooth_avg_window }} + + decrease_ratio="0.95" + increase_ratio="1.005" +### note, unit of bypass_observe_time is second(s) + recovery_observe_time={{ overload_protection.recovery_detect_cycle_in_sec }} + +[PROTOCOL_FEATURE] + ipv6_decapsulation_enabled=1 + ipv6_send_packet_enabled=1 + tcp_drop_pure_ack_pkt=0 + tcp_syn_option_parse_enabled=1 + skip_not_ip_layer_over_eth=0 + skip_gtp_seq_field_for_inject=1 + +[DUPLICATE_PKT] +[dup_pkt.traffic.original] + kickout_udp_stream_enabled=0 +{%- if session_manager.tcp_duplicated_packet_filter == True %} + original_ipv4_tcp_enabled=1 +{%- else %} + original_ipv4_tcp_enabled=0 +{%- endif %} +{%- if session_manager.udp_duplicated_packet_filter == True %} + original_ipv4_udp_enabled=1 +{%- else %} + original_ipv4_udp_enabled=0 +{%- endif %} +### note, can't distinguish between duplicate traffic and application retransmit traffic for IPv6 packets, +### so not support IPv6 original duplicate traffic check. + + +[dup_pkt.traffic.inject] +{%- if session_manager.inject_duplicated_packet_filter == True %} + inject_all_enabled=1 +{%- else %} + inject_all_enabled=0 +{%- endif %} + +[dup_pkt.parameters] + bloom_capacity=1000000 + bloom_error_rate=0.00001 + bloom_timeout=10 + +[STREAM] +### note, stream_id_base_time format is "%Y-%m-%d %H:%M:%S" + stream_id_base_time="2021-01-01 00:00:00" + [stream.tcp] + max={{ session_manager.tcp_session_max }} + timeout={{ session_manager.tcp_session_timeout_in_sec }} + syn_mandatory=1 + reorder_pkt_max={{ session_manager.tcp_session_unordered_pkt_max }} + analyse_option_enabled=1 + tuple4_reuse_time_interval=30 + + meaningful_statistics_minimum_pkt=3 + meaningful_statistics_minimum_byte=5 + opening_timeout={{ session_manager.tcp_session_opening_timeout_in_sec }} + closing_timeout={{ session_manager.tcp_session_closing_timeout_in_sec }} + + [stream.tcp.inject] + link_mss=1460 + + [stream.tcp.inject.rst] + auto_remedy=0 + number=3 + signature_enabled=1 + signature_seed1=65535 + signature_seed2=13 + remedy_kill_tcp_by_inline_device=0 + + [stream.udp] + max={{ session_manager.udp_session_max }} + timeout={{ session_manager.udp_session_timeout_in_sec }} + meaningful_statistics_minimum_pkt=3 + meaningful_statistics_minimum_byte=5 + + +[PROFILING] + [profiling.log] + sapp_log_category="sapp_log" + sapp_plugin_log_category="sapp_plugin_log" + #for profiling-related API control, e.g printaddr + + [profiling.metric] + [profiling.metric.fs2] + enabled=0 + prometheus_port=9273 + prometheus_url_path="/metrics" + local_file="log/fs2_sysinfo.metrics" + refresh_interval_s=1 + + [profiling.metric.fs3] + enabled=0 + prometheus_port=9273 + prometheus_url_path="/metrics" + local_file="log/fs3_sysinfo.metrics" + refresh_interval_s=1 + + [profiling.metric.fs4] + enabled=1 + local_file="./metrics/fs4_sysinfo.json" + refresh_interval_s=1 + app_name="sapp4" + + [profiling.process_latency] + log_category="sapp_process_latency_log" + histogram_enabled=0 + local_file="fs2_process_latency.metrics" + refresh_interval_s=1 +### note, threshold unit is microseconds (us), legal_scope [1,99999999], max value is 99 + threshold_us=1000 +### define in time.h,use CLOCK_MONOTONIC_COARSE as default +### 0 means CLOCK_REALTIME, 1 means CLOCK_MONOTONIC, 2 means CLOCK_PROCESS_CPUTIME_ID, 3 means CLOCK_THREAD_CPUTIME_ID +### 4 means CLOCK_MONOTONIC_RAW, 5 means CLOCK_REALTIME_COARSE, 6 means CLOCK_MONOTONIC_COARSE + clock_gettime_id=6 + + [profiling.sanity_check] + raw_pkt_broken_enabled=0 + symbol_conflict_enabled=0 + +[TOOLS] + [tools.pkt_dump] + enabled=1 +### note, mode options value:[storage, udp_socket] + mode="udp_socket" + BSD_packet_filter="" + + [tools.pkt_dump.threads] +### note, if you want enable pkt dump in all thread, set dump_thread_all_enabled=1, then 'dump_thread_id' is obsoleted. +### if dump_thread_all_enabled=0, then use dump_thread_id to specify separate specified thread index. + all_threads_enabled=1 + +### note, dump_thread_id start from 0, max is CPU.worker_threads-1 + dump_thread_id=[0,1,2,3,4] + + [tools.pkt_dump.udp] + command_port=9345 + pkt_dump_ratio=30 + + [tools.pkt_dump.storage] +### note, file path must be double quotation mark extension, for example, path="/dev/shm/pkt_dump" + path="/dev/shm/pkt_dump" +### note, file size unit: MB + file_size_max_per_thread=10000 + + +[BREAKPAD] + disable_coredump=0 + enable_breakpad=0 + enable_breakpad_upload=0 + breakpad_minidump_dir="/run/sapp/crashreport" + breakpad_upload_tools="/opt/tsg/framework/bin/minidump_upload" + +### note: +### These configurations format is complex and difficult to describe with toml grammar, +### so, create a independent secondary config file to description specific information. + +[SECONDARY_CONFIG_LINK] + cfg_file_sapp_log="etc/sapp_log.conf" + cfg_file_plug_list="plug/conflist.inf" + cfg_file_project_list="etc/project_list.conf" + cfg_file_entrylist="etc/entrylist.conf" + cfg_file_send_raw_pkt="etc/send_raw_pkt.conf" + cfg_file_vxlan_sport_map="etc/vxlan_sport_service_map.conf" + cfg_file_inline_device="etc/gdev.conf" + cfg_file_necessary_plug_list="etc/necessary_plug_list.conf" + cfg_file_stream_compare_layer="etc/stream_compare_layer.conf" + cfg_file_vlan_flipping="etc/vlan_flipping_map.conf" + cfg_file_asymmetric_addr_layer="etc/asymmetric_addr_layer.conf" + cfg_file_well_known_port="etc/well_known_port.conf" + +[SECONDARY_DATA_LINK] + data_file_sysinfo_log="log/sysinfo.log" + data_file_field_stat_log="log/fs2_sysinfo.log" + data_file_inline_keepalive_log="log/gdev_keeplive_status.log" + +[LIBRARY_LINK] + marsio_library_path="/opt/tsg/mrzcpd/lib/libmarsio.so" diff --git a/containers/firewall/templates/send_raw_pkt.conf.j2 b/containers/firewall/templates/send_raw_pkt.conf.j2 new file mode 100644 index 00000000..b1bf4517 --- /dev/null +++ b/containers/firewall/templates/send_raw_pkt.conf.j2 @@ -0,0 +1,9 @@ +#(0:pag,1:pcap,2:dumpfile,3:pfring,4:DPDK,5:ppf,6:NPacket,7:qnf,8:N95,9:pcap-dumpfile-list,10:topsec, +##(11:ipfile, 12:marsio4, 13:agent_smith, 14:dpdk_vxlan, 15:marsio_vxlan, 16:pag_marsio + +#target_id +0 pag p7p2 eth1 dna0 dpdk ppf npacket qnf n95 eth1 topsec eth1 {{ nic_raw_name }} smith dpdk dpdk pag +1 pag eth1 eth1 dna0 dpdk ppf npacket qnf n95 eth1 topsec eth1 {{ nic_raw_name }} smith dpdk dpdk pag +#2 pag eth1 eth1 dna0 dpdk ppf npacket qnf n95 eth1 topsec eth1 p7p1 smith dpdk dpdk pag +#3 pag eth1 eth1 dna0 dpdk ppf npacket qnf n95 eth1 topsec eth1 p7p2 smith dpdk dpdk pag +#4 pag eth1 eth1 dna0 dpdk ppf npacket qnf n95 eth1 topsec eth1 p7p2 smith dpdk dpdk pag diff --git a/containers/firewall/templates/spec.toml.j2 b/containers/firewall/templates/spec.toml.j2 new file mode 100644 index 00000000..cd9941f2 --- /dev/null +++ b/containers/firewall/templates/spec.toml.j2 @@ -0,0 +1,49 @@ +{% if session_flags.enable == True -%} +[[plugin]] +path = "./stellar_plugin/session_flags.so" +init = "session_flags_plugin_init" +exit = "session_flags_plugin_exit" +{%- endif %} + +[[plugin]] +path = "./stellar_plugin/glimpse_detector.so" +init = "APP_GLIMPSE_DETECTOR_LOAD" +exit = "APP_GLIMPSE_DETECTOR_UNLOAD" + +[[plugin]] +path = "./plug/business/firewall/firewall.so" +init = "firewall_stellar_plugin_load" +exit = "firewall_stellar_plugin_unload" + +[[plugin]] +path = "./stellar_plugin/sf_classifier.so" +init = "sf_classifier_init" +exit = "sf_classifier_exit" + +{% if appsketch.qdpi_detector == True and appsketch.enable == True -%} +[[plugin]] +path = "./stellar_plugin/qdpi_detector/qdpi_detector.so" +init = "QDPI_DETECTOR_LOAD" +exit = "QDPI_DETECTOR_UNLOAD" +{%- endif %} + +{% if stat_policy_enforcer.enable == True -%} +[[plugin]] +path = "./stellar_plugin/stat_policy_enforcer.so" +init = "STATISTICS_INIT" +exit = "STATISTICS_EXIT" +{%- endif %} + +{% if traffic_sketch.enable == True -%} +[[plugin]] +path = "./stellar_plugin/traffic_sketch.so" +init = "TRAFFIC_SKETCH_INIT" +exit = "TRAFFIC_SKETCH_EXIT" +{%- endif %} + +{% if policy_sketch.enable == True -%} +[[plugin]] +path = "./stellar_plugin/policy_sketch.so" +init = "POLICY_SKETCH_INIT" +exit = "POLICY_SKETCH_EXIT" +{%- endif %} diff --git a/containers/firewall/templates/ssl_main.conf.j2 b/containers/firewall/templates/ssl_main.conf.j2 new file mode 100644 index 00000000..bc025171 --- /dev/null +++ b/containers/firewall/templates/ssl_main.conf.j2 @@ -0,0 +1,12 @@ +[SSL] +MAX_CACHE_LEN=10240 +{%- if decoders.SSL_CERT == True %} +PARSE_CERTIFICATE_DETAIL=1 +{%- else %} +PARSE_CERTIFICATE_DETAIL=0 +{%- endif %} +{%- if decoders.SSL_DETAIN_FRAG_CHELLO == True %} +DETAIN_FRAG_CHELLO_NUM=6 +{%- else %} +DETAIN_FRAG_CHELLO_NUM=0 +{%- endif %} \ No newline at end of file diff --git a/containers/prometheus/build.mk b/containers/prometheus/build.mk new file mode 100644 index 00000000..08e13c73 --- /dev/null +++ b/containers/prometheus/build.mk @@ -0,0 +1,3 @@ +rule_target := prometheus +rule_prerequisites := +rule_recipes := $(call download_image_tar_from_url,prometheus-v25.8.2.tar) \ No newline at end of file diff --git a/containers/proxy-certstore/Dockerfile.j2 b/containers/proxy-certstore/Dockerfile.j2 new file mode 100644 index 00000000..798e2585 --- /dev/null +++ b/containers/proxy-certstore/Dockerfile.j2 @@ -0,0 +1,20 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +COPY files/redis-6.2.5-1.el7.remi.x86_64.rpm /tmp/ + +RUN dnf -y install /tmp/redis-6.2.5-1.el7.remi.x86_64.rpm && \ + rm -rf /tmp/redis-6.2.5-1.el7.remi.x86_64.rpm && \ + {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} +# files +COPY files/zlog.conf /opt/tsg/certstore/conf/ +COPY files/cert-redis.conf /etc/ +# templates +COPY templates/* /templates/ +# scripts +COPY --chmod=755 entrypoint.sh /usr/local/bin/ + +WORKDIR /opt/tsg/certstore +CMD ["/bin/bash"] diff --git a/containers/proxy-certstore/build.mk b/containers/proxy-certstore/build.mk new file mode 100644 index 00000000..a7a74344 --- /dev/null +++ b/containers/proxy-certstore/build.mk @@ -0,0 +1,3 @@ +rule_target := proxy-certstore +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/proxy-certstore/entrypoint.sh b/containers/proxy-certstore/entrypoint.sh new file mode 100644 index 00000000..5303def7 --- /dev/null +++ b/containers/proxy-certstore/entrypoint.sh @@ -0,0 +1,24 @@ +#!/bin/sh -ex +source /usr/local/bin/entrypoint_public.sh +# start +ldconfig + +parse_args "$@" + +mkdir -p /opt/tsg/etc/ + +render_template cert_store.ini.j2 /opt/tsg/certstore/conf/cert_store.ini +render_template tsg_device_tag.json.j2 /opt/tsg/etc/tsg_device_tag.json + +DEVICE_SN=$(read_device_sn_from_k8s_node_info) +echo "{\"sn\": \"$DEVICE_SN\"}" > /opt/tsg/etc/tsg_sn.json + +if [ ${IS_ENABLE_PRESTART} == "true" ]; then + enable_prestart +fi + +if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then + enable_interactive_startup +fi + +exec /opt/tsg/certstore/bin/certstore \ No newline at end of file diff --git a/containers/proxy-certstore/files/cert-redis.conf b/containers/proxy-certstore/files/cert-redis.conf new file mode 100644 index 00000000..d1364218 --- /dev/null +++ b/containers/proxy-certstore/files/cert-redis.conf @@ -0,0 +1,1052 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind $ip ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 lookback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 0.0.0.0 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +#logfile "/opt/tsg/cert-redis/6379/6379.log" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +#dir /opt/tsg/cert-redis/6379/ + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of slaves. +# 2) Redis slaves are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition slaves automatically try to reconnect to masters +# and resynchronize with them. +# +# slaveof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the slave to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the slave request. +# +# masterauth + +# When a slave loses its connection with the master, or when the replication +# is still in progress, the slave can act in two different ways: +# +# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if slave-serve-stale-data is set to 'no' the slave will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO and SLAVEOF. +# +slave-serve-stale-data yes + +# You can configure a slave instance to accept writes or not. Writing against +# a slave instance may be useful to store some ephemeral data (because data +# written on a slave will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default slaves are read-only. +# +# Note: read only slaves are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only slave exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only slaves using 'rename-command' to shadow all the +# administrative / dangerous commands. +slave-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New slaves and reconnecting slaves that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the slaves. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the slaves incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to slave sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more slaves +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new slaves arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple slaves +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the slaves. +# +# This is important since once the transfer starts, it is not possible to serve +# new slaves arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more slaves arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Slaves send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_slave_period option. The default value is 10 +# seconds. +# +# repl-ping-slave-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of slave. +# 2) Master timeout from the point of view of slaves (data, pings). +# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-slave-period otherwise a timeout will be detected +# every time there is low traffic between the master and the slave. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the slave socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to slaves. But this can add a delay for +# the data to appear on the slave side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the slave side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and slaves are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# slave data when slaves are disconnected for some time, so that when a slave +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the slave missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the slave can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a slave connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected slaves for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last slave disconnected, for +# the backlog buffer to be freed. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The slave priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a slave to promote into a +# master if the master is no longer working correctly. +# +# A slave with a low priority number is considered better for promotion, so +# for instance if there are three slaves with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the slave as not able to perform the +# role of master, so a slave with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +slave-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N slaves connected, having a lag less or equal than M seconds. +# +# The N slaves need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the slave, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough slaves +# are available, to the specified number of seconds. +# +# For example to require at least 3 slaves with a lag <= 10 seconds use: +# +# min-slaves-to-write 3 +# min-slaves-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-slaves-to-write is set to 0 (feature disabled) and +# min-slaves-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# slaves in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover slave instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a masteer. +# +# The listed IP and address normally reported by a slave is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the slave to connect with the master. +# +# Port: The port is communicated by the slave during the replication +# handshake, and is normally the port that the slave is using to +# list for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the slave may be actually reachable via different IP and port +# pairs. The following two options can be used by a slave in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# slave-announce-ip 5.5.5.5 +# slave-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to slaves may cause problems. + +################################### LIMITS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +# Don't use more memory than the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU cache, or to set +# a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have slaves attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the slaves are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of slaves is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have slaves attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for slave +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> remove the key with an expire set using an LRU algorithm +# allkeys-lru -> remove any key according to the LRU algorithm +# volatile-random -> remove a random key with an expire set +# allkeys-random -> remove a random key, any key +# volatile-ttl -> remove the key with the nearest expire time (minor TTL) +# noeviction -> don't expire at all, just return an error on write operations +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs a bit more CPU. 3 is very fast but not very accurate. +# +# maxmemory-samples 5 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A slave of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a slave to actually have a exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple slaves able to failover, they exchange messages +# in order to try to give an advantage to the slave with the best +# replication offset (more data from the master processed). +# Slaves will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single slave computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the slave will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a slave will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * slave-validity-factor) + repl-ping-slave-period +# +# So for example if node-timeout is 30 seconds, and the slave-validity-factor +# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the +# slave will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large slave-validity-factor may allow slaves with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a slave at all. +# +# For maximum availability, it is possible to set the slave-validity-factor +# to a value of 0, which means, that slaves will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-slave-validity-factor 10 + +# Cluster slaves are able to migrate to orphaned masters, that are masters +# that are left without working slaves. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working slaves. +# +# Slaves migrate to orphaned masters only if there are still at least a +# given number of other working slaves for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a slave +# will migrate only if there is at least 1 other working slave for its master +# and so forth. It usually reflects the number of slaves you want for every +# master in your cluster. +# +# Default is 1 (slaves migrate only if their masters remain with at least +# one slave). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# slave -> slave clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and slave clients, since +# subscribers and slaves receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit slave 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes diff --git a/containers/proxy-certstore/files/redis-6.2.5-1.el7.remi.x86_64.rpm b/containers/proxy-certstore/files/redis-6.2.5-1.el7.remi.x86_64.rpm new file mode 100644 index 00000000..3d0d1e04 Binary files /dev/null and b/containers/proxy-certstore/files/redis-6.2.5-1.el7.remi.x86_64.rpm differ diff --git a/containers/proxy-certstore/files/zlog.conf b/containers/proxy-certstore/files/zlog.conf new file mode 100644 index 00000000..f5943dd7 --- /dev/null +++ b/containers/proxy-certstore/files/zlog.conf @@ -0,0 +1,11 @@ +[global] +default format = "%d(%c), %V, %F, %U, %m%n" +rotate lock file = /tmp/certstore_zlog.lock +file perms = 644 +[levels] +DEBUG=10 +INFO=20 +FATAL=30 +[rules] +*.fatal "./logs/error.log.%d(%F)", 500M ~ "./logs/error.log.%d(%F).#2s"; +*.fatal "./logs/certstore.log.%d(%F)", 500M ~ "./logs/certstore.log.%d(%F).#2s"; diff --git a/containers/proxy-certstore/manifest.yaml b/containers/proxy-certstore/manifest.yaml new file mode 100644 index 00000000..1c8c6706 --- /dev/null +++ b/containers/proxy-certstore/manifest.yaml @@ -0,0 +1,4 @@ +packages: + - name: certstore + version: 3.0.5.20241018.c8bca25 + install_command: dnf diff --git a/containers/proxy-certstore/templates/cert_store.ini.j2 b/containers/proxy-certstore/templates/cert_store.ini.j2 new file mode 100644 index 00000000..00c7f87d --- /dev/null +++ b/containers/proxy-certstore/templates/cert_store.ini.j2 @@ -0,0 +1,56 @@ +{% import '/templates/macros.j2' as macros -%} +[SYSTEM] +#1:print on screen, 0:don't +DEBUG_SWITCH = 1 +RUN_LOG_PATH = "conf/zlog.conf" + +[breakpad] + disable_coredump=0 + enable_breakpad=0 + enable_breakpad_upload=0 + breakpad_minidump_dir="/run/certstore/crashreport" + breakpad_upload_tools="/opt/tsg/framework/bin/minidump_upload" +[CONFIG] +#Number of running threads +thread-nu = 4 +#1 rsync, 0 sync +mode=1 +#Local default root certificate is valid for 30 days by default +expire_after = 30 +#Local default root certificate path +local_debug = 1 +ca_path = ./cert/tsg-ca-v3-trust-ca.pem +untrusted_ca_path = ./cert/tsg-ca-v3-untrust-ca.pem + +[MAAT] +#Configure the load mode, +#1: using local json +#2: using Redis reads +maat_json_switch=2 +#When the loading mode is sent to the network, set the scanning configuration modification interval (s). +effective_interval=1 +#Specify the location of the configuration library table file +table_info=./conf/table_info.conf +#Json file path when json schema is used +pxy_obj_keyring=./conf/pxy_obj_keyring.json + +[LIBEVENT] +#Local monitor port number, default is 9991 +port = 9991 + +[CERTSTORE_REDIS] +#The Redis server IP address and port number where the certificate is stored locally +ip = 127.0.0.1 +port = 6379 + +[MAAT_REDIS] +#Maat monitors the Redsi server IP address and port number + +ip = {{ macros.cm_address(external_resources.cm) }} +port = {{ macros.cm_port(external_resources.cm) }} +dbindex = {{ vsys_id }} +[stat] +statsd_server=127.0.0.1 +statsd_port=8100 +statsd_set_prometheus_port=9002 +statsd_set_prometheus_url_path=/metrics diff --git a/containers/proxy/Dockerfile.j2 b/containers/proxy/Dockerfile.j2 new file mode 100644 index 00000000..24db567a --- /dev/null +++ b/containers/proxy/Dockerfile.j2 @@ -0,0 +1,19 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +RUN {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} +# files +COPY files/doh.conf /opt/tsg/tfe/conf/doh/ +COPY files/future.conf /opt/tsg/tfe/conf/tfe/ +COPY files/tsg_diagnose_ca.pem /opt/tsg/tfe/resource/tfe/ +COPY files/zlog.conf /opt/tsg/tfe/conf/tfe/ +# templates +COPY templates/* /templates/ +# scripts +COPY --chmod=755 entrypoint.sh /usr/local/bin/ + +WORKDIR /opt/tsg/tfe + +CMD ["/bin/bash"] diff --git a/containers/proxy/build.mk b/containers/proxy/build.mk new file mode 100644 index 00000000..c1061fa2 --- /dev/null +++ b/containers/proxy/build.mk @@ -0,0 +1,3 @@ +rule_target := proxy +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/proxy/entrypoint.sh b/containers/proxy/entrypoint.sh new file mode 100644 index 00000000..ee9ff589 --- /dev/null +++ b/containers/proxy/entrypoint.sh @@ -0,0 +1,25 @@ +#!/bin/sh -ex +source /usr/local/bin/entrypoint_public.sh + +# start +ldconfig + +parse_args "$@" + +mkdir -p /opt/tsg/etc/ + +render_template tfe.conf.j2 /opt/tsg/tfe/conf/tfe/tfe.conf +render_template tsg_device_tag.json.j2 /opt/tsg/etc/tsg_device_tag.json + +DEVICE_SN=$(read_device_sn_from_k8s_node_info) +echo "{\"sn\": \"$DEVICE_SN\"}" > /opt/tsg/etc/tsg_sn.json + +if [ ${IS_ENABLE_PRESTART} == "true" ]; then + enable_prestart +fi + +if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then + enable_interactive_startup +fi + +exec /opt/tsg/tfe/bin/tfe \ No newline at end of file diff --git a/containers/proxy/files/doh.conf b/containers/proxy/files/doh.conf new file mode 100644 index 00000000..22e9fbaf --- /dev/null +++ b/containers/proxy/files/doh.conf @@ -0,0 +1,12 @@ +[doh] +enable=1 + +[maat] +table_appid=ATTR_APP_ID +table_qname=ATTR_DOH_QNAME +table_host=ATTR_SERVER_FQDN + +[kafka] +ENTRANCE_ID=0 +# if enable "en_sendlog", the iterm "tfe.conf [kafka] enable" must set 1 +en_sendlog=1 diff --git a/containers/proxy/files/future.conf b/containers/proxy/files/future.conf new file mode 100644 index 00000000..f1ef8b0d --- /dev/null +++ b/containers/proxy/files/future.conf @@ -0,0 +1,10 @@ +[STAT] +no_stats=0 +statsd_server=127.0.0.1 +statsd_port=8100 +histogram_bins=0.50,0.80,0.9,0.95 +statsd_cycle=5 +# FS_OUTPUT_STATSD=1, FS_OUTPUT_INFLUX_LINE=2 +statsd_format=2 +# printf diff Not available +# print_diff=1 diff --git a/containers/proxy/files/tsg_diagnose_ca.pem b/containers/proxy/files/tsg_diagnose_ca.pem new file mode 100644 index 00000000..65cd3d74 --- /dev/null +++ b/containers/proxy/files/tsg_diagnose_ca.pem @@ -0,0 +1,36 @@ +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIJAIEUARRZuqOXMA0GCSqGSIb3DQEBCwUAMHcxCzAJBgNV +BAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlqaW5nMRgwFgYD +VQQKDA9EaWFnbm9zZSBCYWRTU0wxKjAoBgNVBAMMIUJhZFNTTCBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eTAeFw0yMTEyMDIwMzU0NDRaFw0zMTExMzAwMzU0NDRa +MHcxCzAJBgNVBAYTAkNOMRAwDgYDVQQIDAdCZWlqaW5nMRAwDgYDVQQHDAdCZWlq +aW5nMRgwFgYDVQQKDA9EaWFnbm9zZSBCYWRTU0wxKjAoBgNVBAMMIUJhZFNTTCBS +b290IENlcnRpZmljYXRlIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAOLd76Ea5epn0NNu5yF6gY1YrzqgFY8PoVP/qaaa+otHooKPqC3t +dwA/jhADP3d6UgS/94rsViGsdawx2jlHLv7TcuJeXMcsZjQAkBVfL5wkEhw5Csr4 +/UmEeLiJiPyj+Dad+Ne7G4qfTv8802HSSBrv6kGcJSsHAzQ1AnzwBaITNmuR3IBE +0vmxulvXyfpsT413z+Ik2N6xp8rI6cINyqNMA356vgM/x79OunE52kTM5ocx7Wd6 +vxUKRpYVPitbhvKvWF45YGkQ0LxnII6PwgnBeA+a0rvdq4XEfEPwyDmQ+H1cdozz +qhPJbTQgX1QL4dgM+erBYKpTjksX65kj89c7DNLdgbS26zDNcyD7oIXmYpwggJOt +J/2zSmN5L79Y8VT0VWlvSv3uTtViTZBPeIsWnT1Ea/sJP6IpjcePLR2MW0GGlcz9 +X97ojp+Ws0I5VFdv77kLeel/2iO3rHPe6xMgJ/7zSre7t0vdwaDzQlsC9FUeDHJS +1SBT4sGfUZs82O8IvvxBSp15eTDlhHcYK/pMgvsC8PDrsiFcehMEh2olXlU/Qi/U +E9lL5Hv6/VmtMS0J0Y7buGfo4iSohVPIYB4Akq/jq3vOsWNIEV686eD8U0JCLxjb +bHkV4WrXJvvElxnB1OnhgF7jfxJgecMUi2bl4VZGWNucRwRmFXGsYsw/AgMBAAGj +gekwgeYwHQYDVR0OBBYEFBPYAnh6x+6Ls7Pv7XLsTJaXOrdaMIGpBgNVHSMEgaEw +gZ6AFBPYAnh6x+6Ls7Pv7XLsTJaXOrdaoXukeTB3MQswCQYDVQQGEwJDTjEQMA4G +A1UECAwHQmVpamluZzEQMA4GA1UEBwwHQmVpamluZzEYMBYGA1UECgwPRGlhZ25v +c2UgQmFkU1NMMSowKAYDVQQDDCFCYWRTU0wgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRo +b3JpdHmCCQCBFAEUWbqjlzAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkq +hkiG9w0BAQsFAAOCAgEAtdA6Y3GATG9mOwOj3vyBjWCbTeVgRXomhquAOVIdh6Xl ++GUXlX2K/bcRW1GGi51s/RzNGOqTym7XHzzROJ2XhxnQcWHIqUpCb6IAGwsBxRVe +ruC8lUDcjitHm2BrdJsDnIpWA+iJ/ihuYuTaDwfIipao/NqftneWp+A6XjQ4pYsY +MoTRsk1HxvYGSLE4wKQhTH9JESCx7I8ltYAnF3bQLKfqcwuPvWNR3Vth5rl2bxfk +c7mmgpGjjQCsFZDcBnPxKqc12VPbmJEtsgP945SrwYCZkg1kd+py2kgcvccE5Y3g +sAhWCMNKMwc1Wg3tPNAQvzXZZKtRUOFd/5mcjkq0QWZcDdA1r74DhB2e22KitUok +YCdM0o+45EVMZVMe0QWUIgByz1PObM7IP1oEFEJ0afRc0Ra6scYXUBS7Nnk2UNxX +bsQrD0GdrX4pb7Zg1RJEgUU51UGflQpzyJuvQIjXT3pAk9IDKQUXi6lNiM85OGKn +HJYX48KuYlG7I0zIa7Tfz6rkcCxGsXe8jx5+4zVkul9gVxmmDAQv1jYOPvoXIFQc +TtfSekSxzop+DsMySQHQqjFOHtAYBoWjCLX9FNURJ/yPcsYpMrcrTDVzucPK8utK +jcCDir6QMO0z4heHZyV5alant/MYEquBe0ooCCAhh138HndvwVHjekh/le/lf28= +-----END CERTIFICATE----- diff --git a/containers/proxy/files/zlog.conf b/containers/proxy/files/zlog.conf new file mode 100644 index 00000000..d092dcf9 --- /dev/null +++ b/containers/proxy/files/zlog.conf @@ -0,0 +1,24 @@ +# kill -s SIGHUP "pid" + +[global] + +default format = "%d(%c), %t, %V, %F, %U, %m%n" +rotate lock file = /tmp/tfe_zlog.lock +file perms = 644 + +[levels] + +DEBUG=10 +INFO=20 +FATAL=30 +#DISABLE=40 + +[rules] + +*.fatal "./log/error.log.%d(%F)", 500M ~ "./log/error.log.%d(%F).#2s"; +tfe.fatal "./log/tfe.log.%d(%F)", 500M ~ "./log/tfe.log.%d(%F).#2s"; +http.fatal "./log/http.log.%d(%F)", 500M ~ "./log/http.log.%d(%F).#2s"; +http2.fatal "./log/http2.log.%d(%F)", 500M ~ "./log/http2.log.%d(%F).#2s"; +doh.fatal "./log/doh_pxy.log.%d(%F)", 500M ~ "./log/doh_pxy.log.%d(%F).#2s"; +tsg_http.fatal "./log/tsg_http_pxy.log.%d(%F)", 500M ~ "./log/tsg_http_pxy.log.%d(%F).#2s"; +packet_io.fatal "./log/packet_io.log.%d(%F)", 500M ~ "./log/packet_io.log.%d(%F).#2s"; diff --git a/containers/proxy/manifest.yaml b/containers/proxy/manifest.yaml new file mode 100644 index 00000000..71f8e5a1 --- /dev/null +++ b/containers/proxy/manifest.yaml @@ -0,0 +1,6 @@ +packages: + - name: tfe + version: 4.10.4.5c05bf4 + download_command: dnf + install_command: rpm + install_command_options: "--prefix /opt/tsg/tfe" \ No newline at end of file diff --git a/containers/proxy/templates/tfe.conf.j2 b/containers/proxy/templates/tfe.conf.j2 new file mode 100644 index 00000000..7b69d763 --- /dev/null +++ b/containers/proxy/templates/tfe.conf.j2 @@ -0,0 +1,279 @@ +{% import '/templates/macros.j2' as macros -%} +[system] +nr_worker_threads={{ 1 if tfe_affinity | length == 1 else (tfe_affinity | length - 1) }} + +# Only when (disable_coredump == 1 || (enable_breakpad == 1 && enable_breakpad_upload == 1)) is satisfied, the core will not be generated locally + +disable_coredump=0 +enable_breakpad=0 +enable_breakpad_upload=0 +# must be /run/tfe/crashreport due to tmpfile limit +breakpad_minidump_dir=/run/tfe/crashreport +breakpad_upload_tools=/opt/tsg/framework/bin/minidump_upload + +# ask for at least (1 + nr_worker_threads) masks +# the first mask for acceptor thread +# the others mask for worker thread +enable_cpu_affinity=1 +cpu_affinity_mask={{ (tfe_affinity[0] ~ ',' ~ tfe_affinity[0]) if tfe_affinity | length == 1 else tfe_affinity | join(',') }} + +# LEAST_CONN = 0; ROUND_ROBIN = 1 +load_balance=1 + +[public] +vsys_id={{ vsys_id }} +data_center={{ macros.read_device_tag_value(device, "data_center") }} +device_group={{ macros.read_device_tag_value(device, "device_group") }} +device_id=DEVICE_ID_PLACE_HOLDER_MARK + +# for enable kni v3 +[nfq] +queue_id=1 +queue_maxlen=655350 +queue_rcvbufsiz=983025000 +queue_no_enobufs=1 + +[kni] +# kni v1 +#uxdomain=/var/run/.tfe_kni_acceptor_handler +# kni v2 +#scm_socket_file=/var/run/.tfe_kmod_scm_socket + +# send cmsg +send_switch=0 +ip=127.0.0.1 +cmsg_port=2475 + +# watch dog +watchdog_switch=0 +watchdog_port=2476 + +[watchdog_tfe] +# The worker thread updates the timestamp every two seconds +# The watchdog thread checks the timestamp every second +enable=1 +timeout_seconds=5 +statistics_window=20 +timeout_cnt_as_fail=3 +timeout_debug=0 + +[ssl] +ssl_debug=0 +# ssl version Not available, configured via TSG website +# ssl_max_version=tls13 +# ssl_min_version=ssl3 +ssl_compression=1 +no_ssl2=1 +no_ssl3=0 +no_tls10=0 +no_tls11=0 +no_tls12=0 +default_ciphers=ALL:-aNULL +no_cert_verify=0 + +# session ticket +no_session_ticket=0 +stek_group_num=4096 +stek_rotation_time=3600 + +# session cache +no_session_cache=0 +session_cache_slots=4194304 +session_cache_expire_seconds=1800 + +# service cache +service_cache_slots=4194304 +service_cache_expire_seconds=300 +service_cache_fail_as_pinning_cnt=4 +service_cache_fail_as_proto_err_cnt=5 +service_cache_fail_time_window=30 +service_cache_succ_as_app_not_pinning_cnt=0 + +# cert +check_cert_crl=0 +trusted_cert_load_local=1 +trusted_cert_file=resource/tfe/tsg_diagnose_ca.pem +trusted_cert_dir=resource/tfe/trusted_storage + +# master key +log_master_key=0 +key_log_file=log/sslkeylog.log + +[key_keeper] +#Mode: debug - generate cert with ca_path, normal - generate cert with cert store +#0 on cache 1 off cache +no_cache=0 +mode=normal +cert_store_host=127.0.0.1 +cert_store_port=9991 +ca_path=resource/tfe/tango-ca-v3-trust-ca.pem +untrusted_ca_path=resource/tfe/tango-ca-v3-untrust-ca.pem +hash_slot_size=131072 +hash_expire_seconds=300 +cert_expire_time=24 + +# health_check only for "mode=normal" default 1 +enable_health_check=1 + +[tsg_http] +enable_plugin=1 +en_sendlog=1 + +[debug] +# 1 : enforce tcp passthrough +# 0 : Whether to passthrough depends on the tcp_options in cmsg +passthrough_all_tcp=0 + +[ratelimit] +read_rate=0 +read_burst=0 +write_rate=0 +write_burst=0 + +[tcp] +# read rcv_buff/snd_buff options from tfe conf +sz_rcv_buffer=-1 +sz_snd_buffer=-1 + +# 1 : use tcp_options in tfe.conf +# 0 : use tcp_options in cmsg +enable_overwrite=0 +tcp_nodelay=1 +so_keepalive=1 +tcp_keepcnt=8 +tcp_keepintvl=15 +tcp_keepidle=30 +tcp_user_timeout=600 +tcp_ttl_upstream=75 +tcp_ttl_downstream=70 + +[stat] +statsd_server=127.0.0.1 +statsd_port=8900 +statsd_cycle=5 +# 1:FS_OUTPUT_STATSD; 2:FS_OUTPUT_INFLUX_LINE +statsd_format=2 +histogram_bins=0.5,0.8,0.9,0.95 +statsd_set_prometheus_port=9001 +statsd_set_prometheus_url_path=/metrics + +[traffic_mirror] +{% if traffic_mirror.enable_decrypted_traffic == True -%} +enable=1 +{%- else -%} +enable=0 +{%- endif %} +device={{ macros.safe_read(nic_mirror_name,"proxy") }} +app_symbol=proxy-mirror-{{ app_symbol_index }} +# 0:TRAFFIC_MIRROR_ETHDEV_AF_PACKET; 1:TRAFFIC_MIRROR_ETHDEV_MARSIO +type=1 + +table_info=resource/pangu/table_info_traffic_mirror.conf +stat_file=log/traffic_mirror.status +default_vlan_id=0 + +[kafka] +brokerlist={{ macros.address_port_pairs_render(external_resources.olap.kafka_brokers.addresses,",") }} +sasl_username={{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_username") }} +sasl_passwd={{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_password") }} +rule_hits_topic=POLICY-RULE-METRIC +proxy_event_topic=PROXY-EVENT +file_stream_topic=TRAFFIC-HTTP-FILE-STREAM-RECORD +exch_cert_topic=PXY-EXCH-INTERMEDIA-CERT + +[maat] +# 0:json 1:redis +maat_input_mode=1 +stat_switch=1 +perf_switch=1 +table_info=resource/pangu/table_info.conf +accept_path=/opt/tsg/etc/tsg_device_tag.json +stat_file=log/pangu_scan.fs2 +effect_interval_s=1 +deferred_load_on=0 + +# json mode conf iterm +json_cfg_file=resource/pangu/pangu_http.json + +# redis mode conf iterm +maat_redis_server={{ macros.cm_address(external_resources.cm) }} +maat_redis_port_range={{ macros.cm_port(external_resources.cm) }} +maat_redis_db_index={{ vsys_id }} + +[proxy_hits] +app_name="proxy_rule_hits" +output_fs_interval_ms=500 +output_kafka_interval_ms=1000 + +# for enable kni v4 +[packet_io] +dup_packet_filter_enable=1 +dup_packet_filter_capacity=1000000 +dup_packet_filter_timeout=10 +# MESA_load_profile not support double +#dup_packet_filter_error_rate=0.00001 +packet_io_debug=0 +packet_io_threads={{ pktio_affinity | length }} +packet_io_cpu_affinity_mask={{ pktio_affinity | join(',') }} + +firewall_sids={{ sid.firewall }} +proxy_sids={{ sid.proxy }} +service_chaining_sids={{ sid.sce }} + +# bypass_all_traffic:1 NF2NF and SF2SF +bypass_all_traffic=0 + +rx_burst_max=128 +app_symbol=proxy-{{ app_symbol_index }} +dev_nf_interface={{ proxy_config.proxy_nic }} + +src_mac_addr = 00:0e:c6:d6:72:c1 + +# tap config +tap_name=tap0 + +# 1.tap_allow_mutilthread=1 load bpf rss obj +# 2.tap_allow_mutilthread=0 not load bpf rss obj +tap_allow_mutilthread=1 +bpf_obj=/opt/tsg/tfe/resource/bpf/bpf_tun_rss_steering.o +# tap_bpf_debug_log: cat /sys/kernel/debug/tracing/trace_pipe +bpf_debug_log=0 +# 2: BPF 使用二元组分流 +# 4: BPF 使用四元组分流 +bpf_hash_mode={{ distmode }} + +# 配置 tap 网卡的 RPS +tap_rps_enable=1 +tap_rps_mask={{ tfe_rps_mask }} + +# iouring config +enable_iouring=1 +enable_debuglog=0 +ring_size=1024 +buff_size=2048 +# io_uring_setup() flags +# IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ +# IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ +# IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ +# IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */ +# IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */ +# IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */ +# IORING_SETUP_R_DISABLED (1U << 6) /* start with ring disabled */ +# IORING_SETUP_SUBMIT_ALL (1U << 7) /* continue submit on error */ +flags=0 +sq_thread_idle=0 + +[traffic_steering] +enable_steering_http=0 +enable_steering_ssl=0 +# 17: 0x11 +so_mask_client=17 +# 34: 0x22 +so_mask_server=34 +device_client=tap_c +device_server=tap_s + +http_keepalive_enable=0 +http_keepalive_path="/metrics" +http_keepalive_addr=192.168.41.60 +http_keepalive_port=9273 \ No newline at end of file diff --git a/containers/sce-bfdd/Dockerfile.j2 b/containers/sce-bfdd/Dockerfile.j2 new file mode 100644 index 00000000..c0893a90 --- /dev/null +++ b/containers/sce-bfdd/Dockerfile.j2 @@ -0,0 +1,15 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +COPY files/libyang2-2.0.7-1.el8.x86_64.rpm /tmp/ + +RUN rpm -i /tmp/libyang2-2.0.7-1.el8.x86_64.rpm && \ + rm -rf /tmp/libyang2-2.0.7-1.el8.x86_64.rpm && \ + {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} + +COPY --chmod=755 entrypoint.sh /usr/local/bin/ + +WORKDIR /opt/tsg/bfdd +CMD ["/bin/bash"] diff --git a/containers/sce-bfdd/build.mk b/containers/sce-bfdd/build.mk new file mode 100644 index 00000000..c531503b --- /dev/null +++ b/containers/sce-bfdd/build.mk @@ -0,0 +1,3 @@ +rule_target := sce-bfdd +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/sce-bfdd/entrypoint.sh b/containers/sce-bfdd/entrypoint.sh new file mode 100644 index 00000000..0121714c --- /dev/null +++ b/containers/sce-bfdd/entrypoint.sh @@ -0,0 +1,22 @@ +#!/bin/sh -ex +source /usr/local/bin/entrypoint_public.sh + +# start +ldconfig + +parse_args "$@" + +mkdir -p /opt/tsg/etc/ + +DEVICE_SN=$(read_device_sn_from_k8s_node_info) +echo "{\"sn\": \"$DEVICE_SN\"}" > /opt/tsg/etc/tsg_sn.json + +if [ ${IS_ENABLE_PRESTART} == "true" ]; then + enable_prestart +fi + +if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then + enable_interactive_startup +fi + +exec /opt/tsg/bfdd/bin/bfdd -u root -g root \ No newline at end of file diff --git a/containers/sce-bfdd/files/libyang2-2.0.7-1.el8.x86_64.rpm b/containers/sce-bfdd/files/libyang2-2.0.7-1.el8.x86_64.rpm new file mode 100644 index 00000000..1eabeb49 Binary files /dev/null and b/containers/sce-bfdd/files/libyang2-2.0.7-1.el8.x86_64.rpm differ diff --git a/containers/sce-bfdd/manifest.yaml b/containers/sce-bfdd/manifest.yaml new file mode 100644 index 00000000..994cd9bb --- /dev/null +++ b/containers/sce-bfdd/manifest.yaml @@ -0,0 +1,4 @@ +packages: + - name: bfdd + version: 1.0.3-release + install_command: dnf \ No newline at end of file diff --git a/containers/sce/Dockerfile.j2 b/containers/sce/Dockerfile.j2 new file mode 100644 index 00000000..09cb3a42 --- /dev/null +++ b/containers/sce/Dockerfile.j2 @@ -0,0 +1,15 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +RUN {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} +# files +COPY files/zlog.conf /opt/tsg/sce/conf/ +# templates +COPY templates/* /templates/ +# scripts +COPY --chmod=755 entrypoint.sh /usr/local/bin/ + +WORKDIR /opt/tsg/sce +CMD ["/bin/bash"] diff --git a/containers/sce/build.mk b/containers/sce/build.mk new file mode 100644 index 00000000..aaf8590d --- /dev/null +++ b/containers/sce/build.mk @@ -0,0 +1,3 @@ +rule_target := sce +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/sce/entrypoint.sh b/containers/sce/entrypoint.sh new file mode 100644 index 00000000..952ca759 --- /dev/null +++ b/containers/sce/entrypoint.sh @@ -0,0 +1,26 @@ +#!/bin/sh -ex +source /usr/local/bin/entrypoint_public.sh + +# start +ldconfig + +parse_args "$@" + +mkdir -p /opt/tsg/etc/ + +render_template sce.conf.j2 /opt/tsg/sce/conf/sce.conf +render_template tsg_device_tag.json.j2 /opt/tsg/etc/tsg_device_tag.json + +DEVICE_SN=$(read_device_sn_from_k8s_node_info) +echo "{\"sn\": \"$DEVICE_SN\"}" > /opt/tsg/etc/tsg_sn.json + +if [ ${IS_ENABLE_PRESTART} == "true" ]; then + enable_prestart +fi + +if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then + enable_interactive_startup +fi + + +exec /opt/tsg/sce/bin/sce diff --git a/containers/sce/files/zlog.conf b/containers/sce/files/zlog.conf new file mode 100644 index 00000000..62be24ea --- /dev/null +++ b/containers/sce/files/zlog.conf @@ -0,0 +1,12 @@ +# kill -s SIGHUP "pid" + +[global] +default format = "%d(%c), %V, %F, %U, %m%n" + +[levels] +DEBUG=10 +INFO=20 +FATAL=30 + +[rules] +sce.fatal "./log/sce.log.%d(%F)", 500M ~ "./log/sce.log.%d(%F).#2s"; diff --git a/containers/sce/manifest.yaml b/containers/sce/manifest.yaml new file mode 100644 index 00000000..910edadb --- /dev/null +++ b/containers/sce/manifest.yaml @@ -0,0 +1,4 @@ +packages: + - name: sce + version: 1.3.6.431ac5f + install_command: dnf \ No newline at end of file diff --git a/containers/sce/templates/sce.conf.j2 b/containers/sce/templates/sce.conf.j2 new file mode 100644 index 00000000..7736dafe --- /dev/null +++ b/containers/sce/templates/sce.conf.j2 @@ -0,0 +1,94 @@ +{% import '/templates/macros.j2' as macros -%} +[system] +nr_worker_threads={{ sce_affinity | length }} +cpu_affinity_mask={{ sce_affinity | join(',') }} +firewall_sids={{ sid.firewall }} +stateless_sids=900 +enable_debug=0 +enable_send_log=1 +ts_update_interval_ms=1 + +# Only when (disable_coredump == 1 || (enable_breakpad == 1 && enable_breakpad_upload == 1)) is satisfied, the core will not be generated locally + +disable_coredump=0 +enable_breakpad=0 +enable_breakpad_upload=0 +# must be /run/sce/crashreport, due to tmpfile limit +breakpad_minidump_dir=/run/sce/crashreport +breakpad_upload_tools=/opt/tsg/framework/bin/minidump_upload + +[maat] +# 0:json 1:redis +input_mode=1 +# LOG_LEVEL_TRACE = 0; LOG_LEVEL_DEBUG = 1; LOG_LEVEL_INFO = 2; +# LOG_LEVEL_WARN = 3; LOG_LEVEL_ERROR = 4; LOG_LEVEL_FATAL = 5; +log_level=5 +stat_switch=1 +perf_switch=1 +scan_detail=0 +deferred_load=0 +effect_interval_ms=1000 +stat_file=log/maat.fs2 +table_info=resource/table_info.conf +accept_path=/opt/tsg/etc/tsg_device_tag.json +json_cfg_file=resource/sce.json +foreign_cont_dir=resource/foreign_files +redis_db_idx={{ vsys_id }} +redis_server={{ macros.cm_address(external_resources.cm) }} +redis_port_range={{ macros.cm_port(external_resources.cm) }} +max_chaining_size=32 + +[packet_io] +# bypass_traffic:0 disable +# bypass_traffic:1 bypass all traffic +# bypass_traffic:2 bypass raw traffic +# bypass_traffic:3 bypass decrypted traffic +bypass_traffic=0 +rx_burst_max=128 +min_timeout_ms=900 +app_symbol=sce-{{ app_symbol_index }} +dev_nf_name={{ sce_config.steering_nic }} + +# dev_endpoint_l2 for vlan +dev_endpoint_l2_name={{ sce_config.vlan_config.endpoint_nic }} +vlan_encapsulate_replace_orig_vlan_header=0 + +# dev_endpoint_l3 for vxlan +dev_endpoint_l3_name={{ sce_config.vxlan_config.endpoint_nic }} +dev_endpoint_l3_ip={{ sce_config.vxlan_config.endpoint_ip }} +# dev_endpoint_l3_mac=aa:aa:aa:aa:aa:aa + +[stat] +output_file=log/sce.fs2 +statsd_server=127.0.0.1 +statsd_port=8100 +# 1 : FS_OUTPUT_STATSD +# 2 : FS_OUTPUT_INFLUX_LINE +statsd_format=2 +statsd_cycle=2 +prometheus_listen_port=9006 +prometheus_listen_url=/metrics + +[metrics] +output_fs_interval_ms=500 +output_kafka_interval_ms=1000 +data_center={{ macros.read_device_tag_value(device, "data_center") }} +device_group={{ macros.read_device_tag_value(device, "device_group") }} +device_id=DEVICE_ID_PLACE_HOLDER_MARK + +[bfdd] +enable=1 +# use default_gw_mac when enable = 0 +default_gw_mac=aa:aa:aa:aa:aa:aa +path=/run/frr/bfdd.vty +device={{ sce_config.vxlan_config.endpoint_nic }} +local_address={{ sce_config.vxlan_config.endpoint_ip }} +gateway={{ macros.safe_read(sce_config,"vxlan_config.endpoint_gateway") }} +icmp_cycle_time_s=10 + +[kafka] +enable_debug=0 +brokerlist={{ macros.address_port_pairs_render(external_resources.olap.kafka_brokers.addresses,",") }} +sasl_username={{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_username") }} +sasl_passwd={{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_password") }} +topic_name=POLICY-RULE-METRIC \ No newline at end of file diff --git a/containers/shaping/Dockerfile.j2 b/containers/shaping/Dockerfile.j2 new file mode 100644 index 00000000..00065113 --- /dev/null +++ b/containers/shaping/Dockerfile.j2 @@ -0,0 +1,16 @@ +{% import 'dockerfile-macros.j2' as macros -%} +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +RUN {{ macros.install_packages(packages) }} && \ + {{ macros.clean_after_install_packages() }} +# files +COPY files/zlog.conf /opt/tsg/shaping_engine/conf/ +# templates +COPY templates/* /templates/ +# scripts +COPY --chmod=755 entrypoint.sh /usr/local/bin/ + +WORKDIR /opt/tsg/shaping_engine + +CMD ["/bin/bash"] diff --git a/containers/shaping/build.mk b/containers/shaping/build.mk new file mode 100644 index 00000000..c4a8b53a --- /dev/null +++ b/containers/shaping/build.mk @@ -0,0 +1,3 @@ +rule_target := shaping +rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE) +rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/shaping/entrypoint.sh b/containers/shaping/entrypoint.sh new file mode 100644 index 00000000..154af0d3 --- /dev/null +++ b/containers/shaping/entrypoint.sh @@ -0,0 +1,38 @@ +#!/bin/sh -ex +source /usr/local/bin/entrypoint_public.sh + +set_configs_in_runtime() +{ + local podname=${HOSTNAME} + local CLUSTER_ANNOUNCE_PORT=$(read_nodeport_from_service ${podname}-8551 default) + local HEALTH_CHECK_ANNOUNCE_PORT=$(read_nodeport_from_service ${podname}-8552 default) + + sed -Ei -c "s|NODE_IP_LOCATION|${NODE_IP?}|g" /opt/tsg/shaping_engine/conf/shaping.conf + sed -Ei -c "s|CLUSTER_ANNOUNCE_PORT_LOCATION|${CLUSTER_ANNOUNCE_PORT?}|g" /opt/tsg/shaping_engine/conf/shaping.conf + sed -Ei -c "s|HEALTH_CHECK_ANNOUNCE_PORT_LOCATION|${HEALTH_CHECK_ANNOUNCE_PORT?}|g" /opt/tsg/shaping_engine/conf/shaping.conf +} + +# start +ldconfig + +parse_args "$@" + +mkdir -p /opt/tsg/etc/ + +render_template shaping.conf.j2 /opt/tsg/shaping_engine/conf/shaping.conf +render_template tsg_device_tag.json.j2 /opt/tsg/etc/tsg_device_tag.json + +DEVICE_SN=$(read_device_sn_from_k8s_node_info) +echo "{\"sn\": \"$DEVICE_SN\"}" > /opt/tsg/etc/tsg_sn.json + +set_configs_in_runtime + +if [ ${IS_ENABLE_PRESTART} == "true" ]; then + enable_prestart +fi + +if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then + enable_interactive_startup +fi + +exec /opt/tsg/shaping_engine/bin/shaping_engine \ No newline at end of file diff --git a/containers/shaping/files/zlog.conf b/containers/shaping/files/zlog.conf new file mode 100644 index 00000000..66fef90c --- /dev/null +++ b/containers/shaping/files/zlog.conf @@ -0,0 +1,13 @@ +[global] +default format = "%d(%c), %V, %F, %U, %m%n" + +[levels] +DEBUG=10 +INFO=20 +FATAL=30 + +[rules] +log_shaping.fatal "./log/shaping.log.%d(%F)", 500M ~ "./log/shaping.log.%d(%F).#2s"; +#log_shaping.fatal >stdout; +#log_shaping.info "./log/info_shaping.log.%d(%F)"; +#log_shaping.debug "./log/debug_shaping.log.%d(%F)"; diff --git a/containers/shaping/manifest.yaml b/containers/shaping/manifest.yaml new file mode 100644 index 00000000..528fe39d --- /dev/null +++ b/containers/shaping/manifest.yaml @@ -0,0 +1,4 @@ +packages: + - name: shaping_engine + version: 3.2.0.ef65ec1 + install_command: dnf \ No newline at end of file diff --git a/containers/shaping/templates/shaping.conf.j2 b/containers/shaping/templates/shaping.conf.j2 new file mode 100644 index 00000000..c9d18cbd --- /dev/null +++ b/containers/shaping/templates/shaping.conf.j2 @@ -0,0 +1,49 @@ +{% import '/templates/macros.j2' as macros -%} +[SYSTEM] +WORK_THREAD_NUM={{ shaping_affinity | length }} +ENABLE_CPU_AFFINITY=1 +CPU_AFFINITY_MASK={{ shaping_affinity | join(',') }} +firewall_sids={{ sid.firewall }} + +[MARSIO] +DEV_INTERFACE="{{ shaping_config.shaping_nic }}" +RX_BRUST_MAX=64 +APP_SYMBOL="shaping-{{ app_symbol_index }}" + +[MAAT] +INPUT_MODE=1 +TABLE_INFO="conf/table_info.json" +JSON_FILE="conf/shaping_maat.json" +REDIS_DB_IDX={{ vsys_id }} +REDIS_IP="{{ macros.cm_address(external_resources.cm) }}" +REDIS_PORT="{{ macros.cm_port(external_resources.cm) }}" + + +[SWARMKV] +SWARMKV_CLUSTER_NAME="tsg-shaping-vsys{{ vsys_id }}" +SWARMKV_NODE_IP="0.0.0.0" +SWARMKV_NODE_PORT=8551 +SWARMKV_CONSUL_IP="NODE_IP_LOCATION" +SWARMKV_CONSUL_PORT=8500 +SWARMKV_CLUSTER_ANNOUNCE_IP="NODE_IP_LOCATION" +SWARMKV_CLUSTER_ANNOUNCE_PORT=CLUSTER_ANNOUNCE_PORT_LOCATION +SWARMKV_HEALTH_CHECK_PORT=8552 +SWARMKV_HEALTH_CHECK_ANNOUNCE_PORT=HEALTH_CHECK_ANNOUNCE_PORT_LOCATION + +[METRIC] +DATA_CENTER={{ macros.read_device_tag_value(device, "data_center") }} +DEVICE_GROUP={{ macros.read_device_tag_value(device, "device_group") }} + + +DEVICE_ID="DEVICE_ID_PLACE_HOLDER_MARK" +KAFKA_TOPIC="POLICY-RULE-METRIC" +KAFKA_BROKERS="{{ macros.address_port_pairs_render(external_resources.olap.kafka_brokers.addresses,",") }}" +KAFKA_USERNAME="{{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_username") }}" +KAFKA_PASSWORD="{{ macros.safe_read(external_resources,"olap.kafka_brokers.sasl_password") }}" + +[CONFIG] +#PROFILE_QUEUE_LEN_PER_PRIORITY_MAX=128 +SESSION_QUEUE_LEN_MAX=32 +QUEUEING_SESSIONS_PER_PRIORITY_PER_THREAD_MAX=1024 +POLLING_NODE_NUM_MAX={"polling_node_num_max":[ 3, 2, 2, 2, 2, 2, 2, 2, 2, 2 ]} + diff --git a/containers/traffic-engine/build.mk b/containers/traffic-engine/build.mk new file mode 100644 index 00000000..4caa49d6 --- /dev/null +++ b/containers/traffic-engine/build.mk @@ -0,0 +1,7 @@ +requisites_images_name := firewall proxy proxy-certstore sce sce-bfdd shaping +rule_target := traffic-engine +rule_prerequisites := $(foreach name,$(requisites_images_name),$(BUILD_DIR)/$(name)/$(BUILD_DONE_FILE)) +rule_recipes := rm -rf $(IMAGE_TAR_DIR)/$(rule_target)-$(IMAGE_TAG)-$(IMAGE_ARCH)-docker.tar;\ + podman save \ + -o $(IMAGE_TAR_DIR)/$(rule_target)-$(IMAGE_TAG)-$(IMAGE_ARCH)-docker.tar \ + -m $(foreach name,$(requisites_images_name),$(IMAGE_REGISTRY)/$(name):$(IMAGE_TAG)) \ No newline at end of file diff --git a/containers/tsg-cm/build.mk b/containers/tsg-cm/build.mk new file mode 100644 index 00000000..b726e512 --- /dev/null +++ b/containers/tsg-cm/build.mk @@ -0,0 +1,22 @@ +tsg_cm_image_registry := ci1.bj.geedge.net +tsg_cm_images := $(tsg_cm_image_registry)/tsg/cm/tsg-cm:api_v24.09_2024090501 \ + $(tsg_cm_image_registry)/common/mariadb:10.11.8 \ + $(tsg_cm_image_registry)/tsg/cm/cm-verify:tsg_v24.09_20240906 + +rule_target := tsg-cm +rule_prerequisites := + +define rule_recipes + [ -z "${TSG_CM_IMAGE_REGISTRY_AUTH_USERNAME}" ] && echo "username not set." && exit 1 || true + [ -z "${TSG_CM_IMAGE_REGISTRY_AUTH_PASSWORD}" ] && echo "password not set." && exit 1 || true + + buildah login \ + -u ${TSG_CM_IMAGE_REGISTRY_AUTH_USERNAME} \ + -p ${TSG_CM_IMAGE_REGISTRY_AUTH_PASSWORD} $(tsg_cm_image_registry); \ + for image in $(tsg_cm_images); do \ + buildah pull $$$$image || exit 1; \ + done + + mkdir -p $(IMAGE_TAR_DIR); \ + podman save -o $(IMAGE_TAR_DIR)/$(rule_target)-docker.tar -m $(tsg_cm_images) +endef \ No newline at end of file diff --git a/helmcharts/Makefile b/helmcharts/Makefile new file mode 100644 index 00000000..3b91a6e0 --- /dev/null +++ b/helmcharts/Makefile @@ -0,0 +1,44 @@ +export VERSION +export APP_VERSION + +HELMCHART_NAMES := $(shell find . -maxdepth 1 -type d ! -name "build" ! -name "." | sed 's|^\./||') + +BUILD_DIR := build +HELMCHART_TAR_DIR := $(BUILD_DIR)/helmcharts +MANIFEST_DIR := $(BUILD_DIR)/manifests +BUILD_DONE_FILE := build.done + +define build_rule +$(1): $(BUILD_DIR)/$(1)/$(BUILD_DONE_FILE) + +$(BUILD_DIR)/$(1)/$(BUILD_DONE_FILE): $(shell find $(1) -type f) $(2) + @mkdir -p $(BUILD_DIR)/$(1) + $(3) + @echo done > $(BUILD_DIR)/$(1)/$(BUILD_DONE_FILE) +endef + +define build_helmchart_package + @mkdir -p $(HELMCHART_TAR_DIR) + /usr/local/bin/helm package --app-version $(3) --version $(2) -d $(HELMCHART_TAR_DIR) $(1)/helm +endef + +define copy_helmchart_package + @mkdir -p $(HELMCHART_TAR_DIR) + cp $(1)/files/*.tgz $(HELMCHART_TAR_DIR) +endef + +define copy_manifest + @mkdir -p $(MANIFEST_DIR) + cp $(1)/files/*.yaml $(MANIFEST_DIR) +endef + +.PHONY: all clean $(HELMCHART_NAMES) + +all: $(HELMCHART_NAMES) + +$(foreach name,$(HELMCHART_NAMES),\ + $(eval include $(name)/build.mk);\ + $(eval $(call build_rule,$(rule_target),$(rule_prerequisites),$(rule_recipes))))) + +clean: + rm -rf $(BUILD_DIR) \ No newline at end of file diff --git a/helmcharts/prometheus/build.mk b/helmcharts/prometheus/build.mk new file mode 100644 index 00000000..d11cc786 --- /dev/null +++ b/helmcharts/prometheus/build.mk @@ -0,0 +1,7 @@ +rule_target := prometheus +rule_prerequisites := + +define rule_recipes + $(call copy_helmchart_package,$(rule_target)) + $(call copy_manifest,$(rule_target)) +endef diff --git a/helmcharts/prometheus/files/prometheus-25.8.2.tgz b/helmcharts/prometheus/files/prometheus-25.8.2.tgz new file mode 100644 index 00000000..7798fe47 Binary files /dev/null and b/helmcharts/prometheus/files/prometheus-25.8.2.tgz differ diff --git a/helmcharts/prometheus/files/prometheus-crd.yaml b/helmcharts/prometheus/files/prometheus-crd.yaml new file mode 100644 index 00000000..e5ba5f3a --- /dev/null +++ b/helmcharts/prometheus/files/prometheus-crd.yaml @@ -0,0 +1,203 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: prometheus + namespace: kube-system +spec: + chart: https://%{KUBERNETES_API}%/static/charts/prometheus-25.8.2.tgz + targetNamespace: tsg-os-system + valuesContent: |- + kube-state-metrics: + image: + pullPolicy: Never + server: + image: + pullPolicy: Never + service: + servicePort: 8080 + nodePort: 30080 + type: NodePort + global: + scrape_interval: 15s + configmapReload: + prometheus: + image: + pullPolicy: Never + alertmanager: + image: + pullPolicy: Never + alertmanager: + image: + pullPolicy: Never + prometheus-pushgateway: + nameOverride: pushgateway + image: + pullPolicy: Never + + prometheus-node-exporter: + nameOverride: node-exporter + image: + pullPolicy: Never + service: + annotations: + prometheus.io/scrape: "true" + tsg/monitor: "true" + extraArgs: + - --collector.ethtool + - --collector.ethtool.device-include=.* + - --collector.ethtool.metrics-include=.* + - --collector.cpu.info + - --collector.netdev + - --collector.netdev.address-info + + serverFiles: + prometheus.yml: + scrape_configs: + - job_name: 'cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + metric_relabel_configs: + - source_labels: [pod] + action: replace + target_label: serviceFunction + regex: (.*)-(firewall|proxy|shaping|sce)-([a-z0-9]+)-([a-z0-9]+) + replacement: $1 + - regex: instance + action: labeldrop + - regex: id + action: labeldrop + - regex: image + action: labeldrop + - regex: name + action: labeldrop + - regex: namespace + action: labeldrop + - regex: pod + action: labeldrop + - source_labels: [__name__, device] + separator: ';' + regex: '^container_fs\w+;(.+k3s/containerd.+|.+kubelet/pods.+)' + action: drop + - source_labels: [__name__, device] + separator: ';' + regex: '^container_fs\w+;overlay.+' + action: drop + + - job_name: 'exporter' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + action: keep + regex: tsg-os-system + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + metric_relabel_configs: + - source_labels: [app] + action: replace + target_label: serviceFunctionIndex + regex: (sapp4|tfe|shaping|sce)(-|_)(.*) + replacement: $3 + - source_labels: [__name__, device] + separator: ';' + regex: '^node\w+;(lo|usb\d+|cnibr\d+|docker\d+|br_dign_[sc]|virtio_dign_[sc]|veth\w+)' + action: drop + - source_labels: [__name__, type] + separator: ';' + regex: '^systemd\w+;(scope|mount|device|slice)' + action: drop + - regex: image_id + action: labeldrop + - regex: container_id + action: labeldrop + - regex: uid + action: labeldrop + - regex: image_spec + action: labeldrop + - source_labels: [mountpoint] + separator: ';' + regex: '(.+k3s/containerd.+|.+kubelet/pods.+)' + action: drop + - regex: instance + action: labeldrop + + + - job_name: 'traffic-engine' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + action: keep + regex: default + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: serviceFunction + regex: (.*)-(firewall|proxy|shaping|sce)-([a-z0-9]+)-([a-z0-9]+) + replacement: $1 + - source_labels: [__meta_kubernetes_pod_label_vsysId] + action: replace + target_label: vsysId + regex: (.+) + - source_labels: [__meta_kubernetes_pod_label_component] + action: replace + target_label: component + regex: (.+) + + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + metric_relabel_configs: + - regex: instance + action: labeldrop + diff --git a/helmcharts/systemd-exporter/build.mk b/helmcharts/systemd-exporter/build.mk new file mode 100644 index 00000000..b8912b74 --- /dev/null +++ b/helmcharts/systemd-exporter/build.mk @@ -0,0 +1,6 @@ +rule_target := systemd-exporter +rule_prerequisites := +define rule_recipes + $(call build_helmchart_package,$(rule_target),0.1.0,0.1.0) + $(call copy_manifest,$(rule_target)) +endef \ No newline at end of file diff --git a/helmcharts/systemd-exporter/files/systemd-exporter-crd.yaml b/helmcharts/systemd-exporter/files/systemd-exporter-crd.yaml new file mode 100644 index 00000000..14cbe633 --- /dev/null +++ b/helmcharts/systemd-exporter/files/systemd-exporter-crd.yaml @@ -0,0 +1,12 @@ +apiVersion: helm.cattle.io/v1 +kind: HelmChart +metadata: + name: systemd-exporter + namespace: kube-system +spec: + chart: https://%{KUBERNETES_API}%/static/charts/systemd-exporter-0.1.0.tgz + targetNamespace: tsg-os-system + valuesContent: |- + service: + annotations: + prometheus.io/scrape: "true" diff --git a/helmcharts/systemd-exporter/helm/Chart.yaml b/helmcharts/systemd-exporter/helm/Chart.yaml new file mode 100644 index 00000000..3d946ff6 --- /dev/null +++ b/helmcharts/systemd-exporter/helm/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: systemd-exporter +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helmcharts/systemd-exporter/helm/templates/_helpers.tpl b/helmcharts/systemd-exporter/helm/templates/_helpers.tpl new file mode 100644 index 00000000..c06c517f --- /dev/null +++ b/helmcharts/systemd-exporter/helm/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "systemd-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "systemd-exporter.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "systemd-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "systemd-exporter.labels" -}} +helm.sh/chart: {{ include "systemd-exporter.chart" . }} +{{ include "systemd-exporter.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "systemd-exporter.selectorLabels" -}} +app.kubernetes.io/name: {{ include "systemd-exporter.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "systemd-exporter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "systemd-exporter.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/helmcharts/systemd-exporter/helm/templates/deployment.yaml b/helmcharts/systemd-exporter/helm/templates/deployment.yaml new file mode 100644 index 00000000..fc39dc75 --- /dev/null +++ b/helmcharts/systemd-exporter/helm/templates/deployment.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: systemd-exporter + name: systemd-exporter + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: systemd-exporter + strategy: + type: Recreate + template: + metadata: + labels: + app: systemd-exporter + spec: + hostNetwork: true + containers: + - env: + - name: MERGER_URLS + value: {{ .Values.exporter.urls }} + - name: MERGER_PORT + value: "{{ .Values.exporter.port }}" + image: "{{ .Values.exporter.image.repository }}:{{ .Values.exporter.image.tag }}" + imagePullPolicy: {{ .Values.exporter.image.pullPolicy }} + name: exporter + ports: + - name: ex-systemd-port + containerPort: {{ .Values.exporter.port }} + protocol: TCP diff --git a/helmcharts/systemd-exporter/helm/templates/service.yaml b/helmcharts/systemd-exporter/helm/templates/service.yaml new file mode 100644 index 00000000..0d4da7d0 --- /dev/null +++ b/helmcharts/systemd-exporter/helm/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: systemd-exporter + name: systemd-exporter + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + namespace: {{ .Release.Namespace }} + +spec: + ports: + - name: systemd-exporter-port + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + port: {{ .Values.service.port }} + targetPort: ex-systemd-port + selector: + app: systemd-exporter + type: {{ .Values.service.type }} + diff --git a/helmcharts/systemd-exporter/helm/values.yaml b/helmcharts/systemd-exporter/helm/values.yaml new file mode 100644 index 00000000..48df1d0f --- /dev/null +++ b/helmcharts/systemd-exporter/helm/values.yaml @@ -0,0 +1,13 @@ +exporter: + image: + repository: quay.io/rebuy/exporter-merger + pullPolicy: Never + tag: "v0.2.0" + port: "9008" + urls: "http://127.0.0.1:9558/metrics" + +service: + type: ClusterIP +# nodePort: "30081" + port: "9008" + annotations: {} diff --git a/helmcharts/traffic-engine/build.mk b/helmcharts/traffic-engine/build.mk new file mode 100644 index 00000000..bb0ffe02 --- /dev/null +++ b/helmcharts/traffic-engine/build.mk @@ -0,0 +1,3 @@ +rule_target := traffic-engine +rule_prerequisites := +rule_recipes := $(call build_helmchart_package,$(rule_target),$(VERSION),$(APP_VERSION)) \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/.helmignore b/helmcharts/traffic-engine/helm/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/helmcharts/traffic-engine/helm/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/helmcharts/traffic-engine/helm/Chart.yaml b/helmcharts/traffic-engine/helm/Chart.yaml new file mode 100644 index 00000000..f2094de0 --- /dev/null +++ b/helmcharts/traffic-engine/helm/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: traffic-engine +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/helmcharts/traffic-engine/helm/charts/.gitkeep b/helmcharts/traffic-engine/helm/charts/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/helmcharts/traffic-engine/helm/templates/_helpers.tpl b/helmcharts/traffic-engine/helm/templates/_helpers.tpl new file mode 100644 index 00000000..1063100d --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/_helpers.tpl @@ -0,0 +1,82 @@ + +{{/* +The volumes related to "mrzcpd". +The volumes will be mounted by "traffic-engine.mount.mrzcpd". +*/}} +{{- define "traffic-engine.volume.mrzcpd" -}} +- name: opt-tsg-mrzcpd + hostPath: + path: /opt/tsg/mrzcpd +- name: var-run-mrzcpd + hostPath: + path: /var/run/mrzcpd +- name: var-run-dpdk + hostPath: + path: /var/run/dpdk +- name: profile-mrzcpd + hostPath: + path: /etc/profile.d/mrzcpd.sh + type: File +- name: ldconfig-mrzcpd + hostPath: + path: /etc/ld.so.conf.d/mrzcpd.conf + type: File +{{- end -}} + +{{/* +The volumeMounts related to "mrzcpd". +Requires "traffic-engine.volume.mrzcpd" +*/}} +{{- define "traffic-engine.mount.mrzcpd" -}} +- name: opt-tsg-mrzcpd + mountPath: /opt/tsg/mrzcpd + mountPropagation: HostToContainer + readOnly: false +- name: var-run-mrzcpd + mountPath: /var/run/mrzcpd + readOnly: false +- name: var-run-dpdk + mountPath: /var/run/dpdk + readOnly: false +- name: profile-mrzcpd + mountPath: /etc/profile.d/mrzcpd.sh + readOnly: true +- name: ldconfig-mrzcpd + mountPath: /etc/ld.so.conf.d/mrzcpd.conf + readOnly: true +{{- end -}} + +{{- define "public.sync-host-timezone.volume" -}} +- name: localtime-volume + hostPath: + path: /etc/localtime +{{- end -}} + +{{- define "public.sync-host-timezone.volume-mount" -}} +- name: localtime-volume + mountPath: /etc/localtime + readOnly: true +{{- end -}} + +{{- define "public.license-support.dev-shm-volume" -}} +- name: dev-shm-volume + hostPath: + path: /dev/shm +{{- end -}} + +{{- define "public.license-support.dev-shm-volume-mount" -}} +- name: dev-shm-volume + mountPath: /dev/shm +{{- end -}} + +{{- define "public.license-support.dev-bus-usb-volume" -}} +- name: dev-bus-usb-node + hostPath: + path: /dev/bus/usb +{{- end -}} + +{{- define "public.license-support.dev-bus-usb-volume-mount" -}} +- name: dev-bus-usb-node + mountPath: /dev/bus/usb + readOnly: true +{{- end -}} \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/clusterrole.yaml b/helmcharts/traffic-engine/helm/templates/clusterrole.yaml new file mode 100644 index 00000000..8f8014e4 --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/clusterrole.yaml @@ -0,0 +1,11 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + name: {{ .Release.Name }} +rules: + - apiGroups: [""] + resources: ["services", "nodes"] + verbs: ["get", "list", "watch"] \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/clusterrolebinding.yaml b/helmcharts/traffic-engine/helm/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..7be3b581 --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + name: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }} +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} diff --git a/helmcharts/traffic-engine/helm/templates/configmap-firewall.yaml b/helmcharts/traffic-engine/helm/templates/configmap-firewall.yaml new file mode 100644 index 00000000..2e4480b3 --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/configmap-firewall.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: firewall-{{ .Release.Name }} + namespace: default +data: + values.yaml: | + app_symbol_index: {{ .Values.app_symbol_index }} + appsketch: +{{ toYaml .Values.appsketch | indent 6 }} + decoders: +{{ toYaml .Values.decoders | indent 6 }} + device: +{{ toYaml .Values.device | indent 6 }} + dos_protector: +{{ toYaml .Values.dos_protector | indent 6 }} + etherfabric_settings: +{{ toYaml .Values.etherfabric_settings | indent 6 }} + external_resources: +{{ toYaml .Values.external_resources | indent 6 }} + file_stream_record: +{{ toYaml .Values.file_stream_record | indent 6 }} + firewall: +{{ toYaml .Values.firewall | indent 6 }} + nic_mirror_name: + firewall: {{ .Values.nic_mirror_name.firewall | default "" }} + nic_raw_name: {{ .Values.nic_raw_name }} + overload_protection: +{{ toYaml .Values.overload_protection | indent 6 }} + packet_capture: +{{ toYaml .Values.packet_capture | indent 6 }} + policy_sketch: +{{ toYaml .Values.policy_sketch | indent 6 }} + sapp_affinity: +{{ toYaml .Values.sapp_affinity | indent 4 }} + session_flags: +{{ toYaml .Values.session_flags | indent 6 }} + session_id_generator: +{{ toYaml .Values.session_id_generator | indent 6 }} + session_manager: +{{ toYaml .Values.session_manager | indent 6 }} + session_record: +{{ toYaml .Values.session_record | indent 6 }} + sid: + inject_adapter: {{ .Values.sid.inject_adapter }} + proxy: {{ .Values.sid.proxy }} + sce: {{ .Values.sid.sce }} + shaping: {{ .Values.sid.shaping }} + stat_policy_enforcer: +{{ toYaml .Values.stat_policy_enforcer | indent 6 }} + traffic_mirror: + enable_raw_traffic: {{ .Values.traffic_mirror.enable_raw_traffic }} + traffic_sketch: +{{ toYaml .Values.traffic_sketch | indent 6 }} + transaction_record: +{{ toYaml .Values.transaction_record | indent 6 }} + voip_record: +{{ toYaml .Values.voip_record | indent 6 }} + vsys_id: {{ .Values.vsys_id }} \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/configmap-proxy.yaml b/helmcharts/traffic-engine/helm/templates/configmap-proxy.yaml new file mode 100644 index 00000000..c88ce7e9 --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/configmap-proxy.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.proxy.enable (ge (len .Values.tfe_affinity) 1) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: proxy-{{ .Release.Name }} + namespace: default +data: + values.yaml: | + app_symbol_index: {{ .Values.app_symbol_index }} + device: +{{ toYaml .Values.device | indent 6 }} + distmode: {{ .Values.distmode }} + external_resources: + cm: +{{ toYaml .Values.external_resources.cm | indent 8 }} + olap: + kafka_brokers: +{{ toYaml .Values.external_resources.olap.kafka_brokers | indent 10 }} + nic_mirror_name: + proxy: {{ .Values.nic_mirror_name.proxy }} + pktio_affinity: +{{ toYaml .Values.pktio_affinity | indent 4 }} + proxy_config: +{{ toYaml .Values.proxy_config | indent 6 }} + sid: + firewall: {{ .Values.sid.firewall }} + proxy: {{ .Values.sid.proxy }} + sce: {{ .Values.sid.sce }} + tfe_affinity: +{{ toYaml .Values.tfe_affinity | indent 4 }} + tfe_rps_mask: {{ .Values.tfe_rps_mask }} + traffic_mirror: + enable_decrypted_traffic: {{ .Values.traffic_mirror.enable_decrypted_traffic }} + vsys_id: {{ .Values.vsys_id }} +{{- end }} \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/configmap-sce.yaml b/helmcharts/traffic-engine/helm/templates/configmap-sce.yaml new file mode 100644 index 00000000..a7dad0b3 --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/configmap-sce.yaml @@ -0,0 +1,25 @@ +{{- if .Values.service_chaining.enable }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: sce-{{ .Release.Name }} + namespace: default +data: + values.yaml: | + app_symbol_index: {{ .Values.app_symbol_index }} + device: +{{ toYaml .Values.device | indent 6 }} + external_resources: + cm: +{{ toYaml .Values.external_resources.cm | indent 8 }} + olap: + kafka_brokers: +{{ toYaml .Values.external_resources.olap.kafka_brokers | indent 10 }} + sce_affinity: +{{ toYaml .Values.sce_affinity | indent 4 }} + sce_config: +{{ toYaml .Values.sce_config | indent 6 }} + sid: + firewall: {{ .Values.sid.firewall }} + vsys_id: {{ .Values.vsys_id }} +{{- end }} diff --git a/helmcharts/traffic-engine/helm/templates/configmap-shaping.yaml b/helmcharts/traffic-engine/helm/templates/configmap-shaping.yaml new file mode 100644 index 00000000..e62ad77f --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/configmap-shaping.yaml @@ -0,0 +1,25 @@ +{{- if .Values.shaping.enable }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: shaping-{{ .Release.Name }} + namespace: default +data: + values.yaml: | + app_symbol_index: {{ .Values.app_symbol_index }} + device: +{{ toYaml .Values.device | indent 6 }} + external_resources: + cm: +{{ toYaml .Values.external_resources.cm | indent 8 }} + olap: + kafka_brokers: +{{ toYaml .Values.external_resources.olap.kafka_brokers | indent 10 }} + shaping_affinity: +{{ toYaml .Values.shaping_affinity | indent 4 }} + shaping_config: +{{ toYaml .Values.shaping_config | indent 6 }} + sid: + firewall: {{ .Values.sid.firewall }} + vsys_id: {{ .Values.vsys_id }} +{{- end }} diff --git a/helmcharts/traffic-engine/helm/templates/deployment-firewall.yaml b/helmcharts/traffic-engine/helm/templates/deployment-firewall.yaml new file mode 100644 index 00000000..6a6015e7 --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/deployment-firewall.yaml @@ -0,0 +1,212 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-firewall + labels: + app: {{ .Release.Name }} + component: firewall + annotations: + reloader.stakater.com/auto: "true" + +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-firewall + strategy: + type: Recreate + template: + metadata: + labels: + app: {{ .Release.Name }}-firewall + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + component: firewall + {{- if .Values.dos_protector.enable }} + dynamic-hostports: '8551.8552' + {{- end }} + annotations: + prometheus.io/port: "9010" + prometheus.io/scrape: "true" + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: {{ .Release.Name }} + containers: + - name: firewall + image: "registry.gdnt-cloud.website/tsg/os/firewall:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + workingDir: /opt/tsg/sapp + command: + - "bash" + - "-ec" + - | + /usr/local/bin/entrypoint.sh \ + {{- if .Values.dos_protector.enable }} + --enable_dos_protector \ + {{- end }} + {{- if .Values.debug.firewall.enable_prestart_script }} + --enable_prestart \ + {{- end }} + {{- if .Values.debug.firewall.enable_interactive_startup }} + --enable_interactive_startup \ + {{- end }} + || echo "Failed to start." + ports: + - containerPort: 51218 + {{- if .Values.dos_protector.enable }} + - containerPort: 8551 + - containerPort: 8552 + {{- end }} + env: + - name: DEPLOYMENT_NAME + value: {{ .Release.Name }}-firewall + - name: MRZCPD_CTRLMSG_LISTEN_ADDR + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OVERRIDE_SLED_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: true +{{- if .Values.debug.firewall.enable_liveness_probe }} + livenessProbe: + tcpSocket: + port: 51218 + failureThreshold: 1 + timeoutSeconds: 10 + startupProbe: + tcpSocket: + port: 51218 + failureThreshold: 90 + periodSeconds: 10 +{{- end }} + + volumeMounts: + - name: journal-volume + mountPath: /run/systemd/journal + - name: firewall-configs-volume + mountPath: "/templates/values.yaml" + subPath: "values.yaml" + - name: firewall-log + mountPath: /opt/tsg/sapp/log + - name: metrics-json-dir + mountPath: "/opt/tsg/sapp/metrics" + {{- if .Values.debug.firewall.enable_prestart_script }} + - name: prestart-dir + mountPath: /tmp/prestart + - name: firewall-prestart + mountPath: /opt/tsg/scripts/prestart.sh + {{- end }} + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + {{- if .Values.debug.firewall.enable_mount_host_filesystem }} + - name: host-root + mountPath: /host + {{- end }} + {{- include "traffic-engine.mount.mrzcpd" . | nindent 8 }} + {{- include "public.license-support.dev-bus-usb-volume-mount" . | nindent 8 }} + {{- include "public.license-support.dev-shm-volume-mount" . | nindent 8 }} + + - name: fieldstat-exporter + image: "registry.gdnt-cloud.website/tsg/os/firewall:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + ldconfig + python3 /opt/tsg/framework/bin/fieldstat_exporter.py prometheus -p 9010 -d /opt/tsg/sapp/metrics + ports: + - containerPort: 9010 + securityContext: + privileged: true + livenessProbe: + tcpSocket: + port: 9010 + failureThreshold: 1 + timeoutSeconds: 10 + startupProbe: + tcpSocket: + port: 9010 + failureThreshold: 5 + periodSeconds: 10 + volumeMounts: + - name: metrics-json-dir + mountPath: "/opt/tsg/sapp/metrics" + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + initContainers: + - name: init-default-svc + image: "registry.gdnt-cloud.website/tsg/os/firewall:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until nslookup kubernetes.default.svc; do echo waiting for kubernetes service; sleep 2; done + + - name: init-packet-io-engine-ready + image: "registry.gdnt-cloud.website/tsg/os/firewall:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until [ $(curl -s -o /dev/null -w "%{http_code}" http://${NODE_IP}:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + + volumes: + - name: journal-volume + hostPath: + path: /run/systemd/journal + type: Directory + - name: firewall-configs-volume + configMap: + name: firewall-{{ .Release.Name }} + - name: metrics-json-dir + emptyDir: {} + - name: firewall-log + hostPath: + path: /var/log/traffic-engine/traffic-engine-{{ .Release.Name }}/sapp/ + {{- if .Values.debug.firewall.enable_prestart_script }} + - name: prestart-dir + hostPath: + path: /etc/tsg-os/{{ .Release.Name }}/ + type: DirectoryOrCreate + - name: firewall-prestart + hostPath: + {{- if .Values.debug.firewall.prestart_script }} + path: {{ .Values.debug.firewall.prestart_script }} + {{- else }} + path: /etc/tsg-os/{{ .Release.Name }}/firewall_prestart_script.sh + {{- end }} + type: FileOrCreate + {{- end }} + {{- include "traffic-engine.volume.mrzcpd" . | nindent 6 }} + {{- include "public.sync-host-timezone.volume" . | nindent 6 }} + {{- if .Values.debug.firewall.enable_mount_host_filesystem }} + - name: host-root + hostPath: + path: / + {{- end }} + {{- include "public.license-support.dev-bus-usb-volume" . | nindent 6 }} + {{- include "public.license-support.dev-shm-volume" . | nindent 6 }} \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/deployment-proxy.yaml b/helmcharts/traffic-engine/helm/templates/deployment-proxy.yaml new file mode 100644 index 00000000..2fcab8dc --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/deployment-proxy.yaml @@ -0,0 +1,329 @@ +{{- if and .Values.proxy.enable (ge (len .Values.tfe_affinity) 1) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-proxy + labels: + app: {{ .Release.Name }} + component: proxy + annotations: + reloader.stakater.com/auto: "true" + +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-proxy + strategy: + type: Recreate + template: + metadata: + labels: + app: {{ .Release.Name }}-proxy + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + component: proxy + annotations: + prometheus.io/port: "9003" + prometheus.io/scrape: "true" + + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: {{ .Release.Name }} + containers: + - name: proxy + image: "registry.gdnt-cloud.website/tsg/os/proxy:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + workingDir: /opt/tsg/tfe + command: + - "bash" + - "-ec" + - | + /usr/local/bin/entrypoint.sh \ + {{- if .Values.debug.proxy.enable_prestart_script }} + --enable_prestart \ + {{- end }} + {{- if .Values.debug.proxy.enable_interactive_startup }} + --enable_interactive_startup \ + {{- end }} + || echo "Failed to start." + ports: + - containerPort: 9001 + env: + - name: DEPLOYMENT_NAME + value: {{ .Release.Name }}-proxy + - name: MRZCPD_CTRLMSG_LISTEN_ADDR + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OVERRIDE_SLED_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: true +{{- if .Values.debug.proxy.enable_liveness_probe }} + livenessProbe: + tcpSocket: + port: 9001 + failureThreshold: 1 + timeoutSeconds: 10 + startupProbe: + tcpSocket: + port: 9001 + failureThreshold: 30 + periodSeconds: 10 +{{- end }} + volumeMounts: + - name: journal-volume + mountPath: /run/systemd/journal + - name: proxy-configs-volume + mountPath: "/templates/values.yaml" + subPath: "values.yaml" + - name: proxy-log + mountPath: /opt/tsg/tfe/log + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + {{- if .Values.debug.proxy.enable_prestart_script }} + - name: prestart-dir + mountPath: /tmp/prestart + - name: proxy-prestart + mountPath: /opt/tsg/scripts/prestart.sh + {{- end }} + {{- if .Values.debug.proxy.enable_mount_host_filesystem }} + - name: host-root + mountPath: /host + {{- end }} + {{- include "traffic-engine.mount.mrzcpd" . | nindent 8 }} + {{- include "public.license-support.dev-bus-usb-volume-mount" . | nindent 8 }} + + - name: certstore + image: "registry.gdnt-cloud.website/tsg/os/proxy-certstore:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + workingDir: /opt/tsg/certstore + command: + - "bash" + - "-ec" + - | + /usr/local/bin/entrypoint.sh || echo "Failed to start." + securityContext: + privileged: true + ports: + - containerPort: 9002 + env: + - name: OVERRIDE_SLED_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: proxy-configs-volume + mountPath: "/templates/values.yaml" + subPath: "values.yaml" + - name: certstore-log + mountPath: /opt/tsg/certstore/logs + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + - name: cert-redis + image: "registry.gdnt-cloud.website/tsg/os/proxy-certstore:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: ["/usr/bin/redis-server", "/etc/cert-redis.conf"] + securityContext: + privileged: true + volumeMounts: + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + - name: merged-exporter + image: "quay.io/rebuy/exporter-merger:v0.2.0" + imagePullPolicy: Never + env: + - name: MERGER_URLS + value: http://127.0.0.1:9001/metrics http://127.0.0.1:9002/metrics + - name: MERGER_PORT + value: "9003" + ports: + - containerPort: 9003 + + initContainers: + - name: init-default-svc + image: "registry.gdnt-cloud.website/tsg/os/proxy:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until nslookup kubernetes.default.svc; do echo waiting for kubernetes service; sleep 2; done + + - name: init-packet-io-engine-ready + image: "registry.gdnt-cloud.website/tsg/os/proxy:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until [ $(curl -s -o /dev/null -w "%{http_code}" http://${NODE_IP}:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + + - name: proxy-init + image: "registry.gdnt-cloud.website/tsg/os/proxy:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ecx" + - | + mount -o remount,rw /sys + # disable rpfilter + sysctl -w net.ipv4.conf.all.rp_filter=0 + sysctl -w net.ipv4.conf.default.rp_filter=0 + + # fs + sysctl -w fs.file-max=1048576 + sysctl -w net.core.somaxconn=131072 + + # tcp options about TIME_WAIT + sysctl -w net.ipv4.tcp_fin_timeout=10 + sysctl -w net.ipv4.tcp_tw_reuse=1 + sysctl -w net.ipv4.tcp_max_tw_buckets=4096 + sysctl -w net.ipv4.tcp_max_syn_backlog=131072 + + # bbr + sysctl -w net.ipv4.tcp_congestion_control=bbr + + # tcp feature + sysctl -w net.ipv4.tcp_ecn=0 + sysctl -w net.ipv4.tcp_sack=1 + sysctl -w net.ipv4.tcp_timestamps=1 + + # disable tcp windows scaling for kernel bugs + sysctl -w net.ipv4.tcp_window_scaling=0 + + ip tuntap add dev tap0 mode tap multi_queue + /usr/sbin/ip link set tap0 address fe:65:b7:03:50:bd + /usr/sbin/ip link set tap0 up + /usr/sbin/ip addr flush dev tap0 + /usr/sbin/ip addr add 172.16.241.2/30 dev tap0 + /usr/sbin/ip neigh flush dev tap0 + /usr/sbin/ip neigh add 172.16.241.1 lladdr 00:0e:c6:d6:72:c1 dev tap0 nud permanent + /usr/sbin/ip6tables -A INPUT -i tap0 -m bpf --bytecode '17,48 0 0 0,84 0 0 240,21 0 13 96,48 0 0 6,21 0 11 6,40 0 0 4,37 0 9 24,48 0 0 52,84 0 0 240,116 0 0 2,53 0 5 24,48 0 0 60,21 0 3 88,48 0 0 61,21 0 1 4,6 0 0 65535,6 0 0 0' -j NFQUEUE --queue-num 1 + /usr/sbin/iptables -A INPUT -i tap0 -m bpf --bytecode '18,48 0 0 0,84 0 0 240,21 0 14 64,48 0 0 9,21 0 12 6,40 0 0 6,69 10 0 8191,177 0 0 0,80 0 0 12,84 0 0 240,116 0 0 2,53 0 5 24,80 0 0 20,21 0 3 88,80 0 0 21,21 0 1 4,6 0 0 65535,6 0 0 0' -j NFQUEUE --queue-num 1 + /usr/sbin/ip rule add iif tap0 tab 100 + /usr/sbin/ip route add local default dev lo table 100 + /usr/sbin/ip rule add fwmark 0x65 lookup 101 + /usr/sbin/ip route add default dev tap0 via 172.16.241.1 table 101 + /usr/sbin/ip addr add fd00::02/64 dev tap0 + /usr/sbin/ip -6 route add default via fd00::01 + /usr/sbin/ip -6 rule add iif tap0 tab 102 + /usr/sbin/ip -6 route add local default dev lo table 102 + /usr/sbin/ip -6 neigh add fd00::01 lladdr 00:0e:c6:d6:72:c1 dev tap0 nud permanent + + #decrypted traffic steering + /usr/sbin/ip tuntap add dev tap_c mode tap multi_queue + /usr/sbin/ip tuntap add dev tap_s mode tap multi_queue + + /usr/sbin/ip link set tap_c address 80:61:5f:0f:97:e5 + /usr/sbin/ip link set tap_s address 80:61:5f:0f:97:e6 + + /usr/sbin/ip link set tap_c up + /usr/sbin/ip link set tap_s up + + /usr/sbin/ethtool --offload tap_c rx off tx off + /usr/sbin/ethtool --offload tap_s rx off tx off + + /usr/sbin/ip link set tap_c up + /usr/sbin/ip link set tap_s up + /usr/sbin/ip addr flush dev tap_c + /usr/sbin/ip addr flush dev tap_s + + /usr/sbin/ip addr add 2.2.2.2/24 dev tap_c + /usr/sbin/ip addr add 3.3.3.3/24 dev tap_s + /usr/sbin/ip -4 neigh flush dev tap_c + /usr/sbin/ip -4 neigh flush dev tap_s + /usr/sbin/ip -4 neigh add 2.2.2.1 lladdr 80:61:5f:0f:97:e6 dev tap_c nud permanent + /usr/sbin/ip -4 neigh add 3.3.3.1 lladdr 80:61:5f:0f:97:e5 dev tap_s nud permanent + /usr/sbin/ip -4 rule add fwmark 0x11 lookup 111 + /usr/sbin/ip -4 rule add fwmark 0x22 lookup 222 + /usr/sbin/ip -4 route add default dev tap_c via 2.2.2.1 table 111 + /usr/sbin/ip -4 route add default dev tap_s via 3.3.3.1 table 222 + /usr/sbin/ip -4 rule add iif tap_c tab 100 + /usr/sbin/ip -4 rule add iif tap_s tab 100 + + /usr/sbin/ip addr add fd02::02/64 dev tap_c + /usr/sbin/ip addr add fd03::03/64 dev tap_s + /usr/sbin/ip -6 neigh flush dev tap_c + /usr/sbin/ip -6 neigh flush dev tap_s + /usr/sbin/ip -6 neigh add fd02::01 lladdr 80:61:5f:0f:97:e6 dev tap_c nud permanent + /usr/sbin/ip -6 neigh add fd03::01 lladdr 80:61:5f:0f:97:e5 dev tap_s nud permanent + /usr/sbin/ip -6 rule add fwmark 0x11 lookup 333 + /usr/sbin/ip -6 rule add fwmark 0x22 lookup 444 + /usr/sbin/ip -6 route add default dev tap_c via fd02::01 table 333 + /usr/sbin/ip -6 route add default dev tap_s via fd03::01 table 444 + /usr/sbin/ip -6 rule add iif tap_c tab 102 + /usr/sbin/ip -6 rule add iif tap_s tab 102 + securityContext: + privileged: true + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + volumes: + - name: journal-volume + hostPath: + path: /run/systemd/journal + type: Directory + - name: proxy-configs-volume + configMap: + name: proxy-{{ .Release.Name }} + - name: proxy-log + hostPath: + path: /var/log/traffic-engine/traffic-engine-{{ .Release.Name }}/tfe/ + - name: certstore-log + hostPath: + path: /var/log/traffic-engine/traffic-engine-{{ .Release.Name }}/certstore/ + {{- include "traffic-engine.volume.mrzcpd" . | nindent 6 }} + {{- include "public.sync-host-timezone.volume" . | nindent 6 }} + {{- if .Values.debug.proxy.enable_prestart_script }} + - name: prestart-dir + hostPath: + path: /etc/tsg-os/{{ .Release.Name }}/ + type: DirectoryOrCreate + - name: proxy-prestart + hostPath: + {{- if .Values.debug.proxy.prestart_script }} + path: {{ .Values.debug.proxy.prestart_script }} + {{- else }} + path: /etc/tsg-os/{{ .Release.Name }}/proxy_prestart_script.sh + {{- end }} + type: FileOrCreate + {{- end }} + {{- if .Values.debug.proxy.enable_mount_host_filesystem }} + - name: host-root + hostPath: + path: / + {{- end }} + {{- include "public.license-support.dev-bus-usb-volume" . | nindent 6 }} + +{{- end }} \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/deployment-sce.yaml b/helmcharts/traffic-engine/helm/templates/deployment-sce.yaml new file mode 100644 index 00000000..bac85a9b --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/deployment-sce.yaml @@ -0,0 +1,234 @@ +{{- if .Values.service_chaining.enable }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-sce + labels: + app: {{ .Release.Name }} + component: service-chaining + annotations: + reloader.stakater.com/auto: "true" + +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-service-chaining + strategy: + type: Recreate + template: + metadata: + labels: + app: {{ .Release.Name }}-service-chaining + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + component: service-chaining + annotations: + prometheus.io/port: "9006" + prometheus.io/scrape: "true" + + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: {{ .Release.Name }} + containers: + - name: sce + image: "registry.gdnt-cloud.website/tsg/os/sce:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + workingDir: /opt/tsg/sce + command: + - "bash" + - "-ec" + - | + /usr/local/bin/entrypoint.sh \ + {{- if .Values.debug.service_chaining.enable_prestart_script }} + --enable_prestart \ + {{- end }} + {{- if .Values.debug.service_chaining.enable_interactive_startup }} + --enable_interactive_startup \ + {{- end }} + || echo "Failed to start." + ports: + - containerPort: 9006 + env: + - name: DEPLOYMENT_NAME + value: {{ .Release.Name }}-service-chaining + - name: MRZCPD_CTRLMSG_LISTEN_ADDR + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OVERRIDE_SLED_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: true +{{- if .Values.debug.service_chaining.enable_liveness_probe }} + livenessProbe: + tcpSocket: + port: 9006 + failureThreshold: 1 + timeoutSeconds: 10 + startupProbe: + tcpSocket: + port: 9006 + failureThreshold: 30 + periodSeconds: 10 +{{- end }} + volumeMounts: + - name: journal-volume + mountPath: /run/systemd/journal + - name: sce-configs-volume + mountPath: "/templates/values.yaml" + subPath: "values.yaml" + - name: sce-log + mountPath: /opt/tsg/sce/log + - name: bfdd-unix-socket + mountPath: /run/frr + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + {{- if .Values.debug.service_chaining.enable_prestart_script }} + - name: prestart-dir + mountPath: /tmp/prestart + - name: service-chaining-prestart + mountPath: /opt/tsg/scripts/prestart.sh + {{- end }} + {{- if .Values.debug.service_chaining.enable_mount_host_filesystem }} + - name: host-root + mountPath: /host + {{- end }} + {{- include "traffic-engine.mount.mrzcpd" . | nindent 8 }} + + - name: bfdd + image: "registry.gdnt-cloud.website/tsg/os/sce-bfdd:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + workingDir: /opt/tsg/bfdd + command: + - "bash" + - "-ec" + - | + /usr/local/bin/entrypoint.sh + env: + - name: MRZCPD_CTRLMSG_LISTEN_ADDR + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OVERRIDE_SLED_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - name: bfdd-log + mountPath: /opt/tsg/bfdd/log + - name: bfdd-unix-socket + mountPath: /run/frr + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + initContainers: + - name: init-default-svc + image: "registry.gdnt-cloud.website/tsg/os/sce:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until nslookup kubernetes.default.svc; do echo waiting for kubernetes service; sleep 2; done + + - name: init-packet-io-engine-ready + image: "registry.gdnt-cloud.website/tsg/os/sce:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until [ $(curl -s -o /dev/null -w "%{http_code}" http://${NODE_IP}:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + + - name: service-chaining-init + image: "registry.gdnt-cloud.website/tsg/os/sce:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ecx" + - | + {{- if .Values.sce_config.vxlan_config.endpoint_nic }} + ip tuntap add dev {{ .Values.sce_config.vxlan_config.endpoint_nic }} mode tap + ip link set dev {{ .Values.sce_config.vxlan_config.endpoint_nic }} up + ip route add {{ .Values.sce_config.vxlan_config.endpoint_netip }}/{{ .Values.sce_config.vxlan_config.endpoint_mask }} dev {{ .Values.sce_config.vxlan_config.endpoint_nic }} table 10 + {{- if .Values.sce_config.vxlan_config.endpoint_gateway }} + ip route add default via {{ .Values.sce_config.vxlan_config.endpoint_gateway }} table 10 + {{- end }} + ip a a {{ .Values.sce_config.vxlan_config.endpoint_ip }}/{{ .Values.sce_config.vxlan_config.endpoint_mask }} dev {{ .Values.sce_config.vxlan_config.endpoint_nic }} noprefixroute + ip rule add dport 3784 table 10 + iptables -t mangle -A PREROUTING -p udp --dport 3784 -j TTL --ttl-set 255 + {{- end }} + securityContext: + privileged: true + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + volumes: + - name: journal-volume + hostPath: + path: /run/systemd/journal + type: Directory + - name: sce-configs-volume + configMap: + name: sce-{{ .Release.Name }} + - name: sce-log + hostPath: + path: /var/log/traffic-engine/traffic-engine-{{ .Release.Name }}/sce/ + - name: bfdd-log + hostPath: + path: /var/log/traffic-engine/traffic-engine-{{ .Release.Name }}/bfdd/ + - name: bfdd-unix-socket + emptyDir: {} + {{- include "traffic-engine.volume.mrzcpd" . | nindent 6 }} + {{- include "public.sync-host-timezone.volume" . | nindent 6 }} + {{- if .Values.debug.service_chaining.enable_prestart_script }} + - name: prestart-dir + hostPath: + path: /etc/tsg-os/{{ .Release.Name }}/ + type: DirectoryOrCreate + - name: service-chaining-prestart + hostPath: + {{- if .Values.debug.service_chaining.prestart_script }} + path: {{ .Values.debug.service_chaining.prestart_script }} + {{- else }} + path: /etc/tsg-os/{{ .Release.Name }}/service_chaining_prestart_script.sh + {{- end }} + type: FileOrCreate + {{- end }} + {{- if .Values.debug.service_chaining.enable_mount_host_filesystem }} + - name: host-root + hostPath: + path: / + {{- end }} +{{- end }} \ No newline at end of file diff --git a/helmcharts/traffic-engine/helm/templates/deployment-shaping.yaml b/helmcharts/traffic-engine/helm/templates/deployment-shaping.yaml new file mode 100644 index 00000000..4f08414a --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/deployment-shaping.yaml @@ -0,0 +1,206 @@ +{{- if .Values.shaping.enable }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-shaping + labels: + app: {{ .Release.Name }} + component: shaping + annotations: + reloader.stakater.com/auto: "true" + +spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Release.Name }}-shaping + strategy: + type: Recreate + template: + metadata: + labels: + app: {{ .Release.Name }}-shaping + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + component: shaping + dynamic-hostports: '8551.8552' + annotations: + prometheus.io/port: "9007" + prometheus.io/scrape: "true" + + spec: + tolerations: + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + serviceAccountName: {{ .Release.Name }} + containers: + - name: shaping + image: "registry.gdnt-cloud.website/tsg/os/shaping:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + workingDir: /opt/tsg/shaping_engine + command: + - "bash" + - "-ec" + - | + /usr/local/bin/entrypoint.sh \ + {{- if .Values.debug.shaping.enable_prestart_script }} + --enable_prestart \ + {{- end }} + {{- if .Values.debug.shaping.enable_interactive_startup }} + --enable_interactive_startup \ + {{- end }} + || echo "Failed to start." + ports: + - containerPort: 8551 + - containerPort: 8552 + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEPLOYMENT_NAME + value: {{ .Release.Name }}-shaping + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: MRZCPD_CTRLMSG_LISTEN_ADDR + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: OVERRIDE_SLED_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName +{{- if .Values.debug.shaping.enable_liveness_probe }} + livenessProbe: + tcpSocket: + port: 8552 + failureThreshold: 1 + timeoutSeconds: 10 + startupProbe: + tcpSocket: + port: 8552 + failureThreshold: 30 + periodSeconds: 10 +{{- end }} + securityContext: + privileged: true + volumeMounts: + - name: journal-volume + mountPath: /run/systemd/journal + - name: shaping-configs-volume + mountPath: "/templates/values.yaml" + subPath: "values.yaml" + - name: shaping-log + mountPath: /opt/tsg/shaping_engine/log + - name: metrics-json-dir + mountPath: "/opt/tsg/shaping_engine/metric" + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + {{- if .Values.debug.shaping.enable_prestart_script }} + - name: prestart-dir + mountPath: /tmp/prestart + - name: shaping-prestart + mountPath: /opt/tsg/scripts/prestart.sh + {{- end }} + {{- if .Values.debug.shaping.enable_mount_host_filesystem }} + - name: host-root + mountPath: /host + {{- end }} + {{- include "traffic-engine.mount.mrzcpd" . | nindent 8 }} + + - name: fieldstat-exporter + image: "registry.gdnt-cloud.website/tsg/os/shaping:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + ldconfig + python3 /opt/tsg/framework/bin/fieldstat_exporter.py prometheus -p 9007 -d /opt/tsg/shaping_engine/metric + ports: + - containerPort: 9007 + securityContext: + privileged: true + livenessProbe: + tcpSocket: + port: 9007 + failureThreshold: 1 + timeoutSeconds: 10 + startupProbe: + tcpSocket: + port: 9007 + failureThreshold: 5 + periodSeconds: 10 + volumeMounts: + - name: metrics-json-dir + mountPath: "/opt/tsg/shaping_engine/metric" + {{- include "public.sync-host-timezone.volume-mount" . | nindent 8 }} + + initContainers: + - name: init-default-svc + image: "registry.gdnt-cloud.website/tsg/os/shaping:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until nslookup kubernetes.default.svc; do echo waiting for kubernetes service; sleep 2; done + + - name: init-packet-io-engine-ready + image: "registry.gdnt-cloud.website/tsg/os/shaping:{{ .Chart.AppVersion }}" + imagePullPolicy: Never + command: + - "bash" + - "-ec" + - | + until [ $(curl -s -o /dev/null -w "%{http_code}" http://${NODE_IP}:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done + env: + - name: NODE_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + + volumes: + - name: journal-volume + hostPath: + path: /run/systemd/journal + type: Directory + - name: shaping-configs-volume + configMap: + name: shaping-{{ .Release.Name }} + - name: metrics-json-dir + emptyDir: {} + - name: shaping-log + hostPath: + path: /var/log/traffic-engine/traffic-engine-{{ .Release.Name }}/shaping_engine/ + {{- include "traffic-engine.volume.mrzcpd" . | nindent 6 }} + {{- include "public.sync-host-timezone.volume" . | nindent 6 }} + {{- if .Values.debug.shaping.enable_prestart_script }} + - name: prestart-dir + hostPath: + path: /etc/tsg-os/{{ .Release.Name }}/ + type: DirectoryOrCreate + - name: shaping-prestart + hostPath: + {{- if .Values.debug.shaping.prestart_script }} + path: {{ .Values.debug.shaping.prestart_script }} + {{- else }} + path: /etc/tsg-os/{{ .Release.Name }}/shaping_prestart_script.sh + {{- end }} + type: FileOrCreate + {{- end }} + {{- if .Values.debug.shaping.enable_mount_host_filesystem }} + - name: host-root + hostPath: + path: / + {{- end }} +{{- end }} diff --git a/helmcharts/traffic-engine/helm/templates/serviceaccount.yaml b/helmcharts/traffic-engine/helm/templates/serviceaccount.yaml new file mode 100644 index 00000000..b152de2f --- /dev/null +++ b/helmcharts/traffic-engine/helm/templates/serviceaccount.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + vsysId: "{{ .Values.vsys_id }}" + serviceFunction: {{ .Release.Name }} + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} diff --git a/helmcharts/traffic-engine/helm/values.yaml b/helmcharts/traffic-engine/helm/values.yaml new file mode 100644 index 00000000..4253359e --- /dev/null +++ b/helmcharts/traffic-engine/helm/values.yaml @@ -0,0 +1,256 @@ +# external_resources: +# cm: +# ## @param external_resources.cm.connection value in [direct, local_cache], default: direct +# ## +# connectivity: direct +# direct: +# address: 10.X.X.X +# port: 7002 +# local_cache: +# cache_name: tsg_traffic_cm_local_cache_1 +# port_num: 1 +# sd: +# ## @param external_resources.cm.connection value in [direct, local_cache], default: local_cache +# ## +# enable: no +# connectivity: direct +# db_index: 0 +# policy_effect_interval_ms: 100 +# policy_garbage_collection_interval_ms: 30000 +# policy_update_check_interval_ms: 100 +# direct: +# address: 10.1.1.1 +# port: 7002 +# local_cache: +# cache_name: tsg_traffic_sd_local_cache_1 + + +# olap: +# kafka_brokers: +# sasl_username: +# sasl_password: +# addresses: +# - address: +# port: +# udp_collectors: +# enable: no +# addresses: +# - address: +# port: + + +# device: +# tags: +# - key1: value1 +# - key2: value2 + +# session_id_generator: +# snowflake_worker_id_base: 1 +# snowflake_worker_id_offset: 1 + +# firewall: +# enable: yes +# enable_smartoffload: no +# logs: +# enable: yes +# contains_app_id: +# enable: yes +# contains_dns_resource_record: +# enable: yes +# ringbuf: +# size: 100000 + +# appsketch: +# enable: yes +# qdpi_detector: yes +# context_based_detector: yes + +# transaction_record: +# enable_http: yes +# enable_dns: yes +# enable_mail: yes + +# session_record: +# enable: yes + +# file_stream_record: +# enable: yes + +# session_manager: +# tcp_session_max: 20021 +# tcp_session_unordered_pkt_max: 128 +# tcp_session_timeout_in_sec: 30 +# udp_session_timeout_in_sec: 60 +# tcp_session_opening_timeout_in_sec: 60 +# tcp_session_closing_timeout_in_sec: 30 +# udp_session_max: 5021 +# tcp_duplicated_packet_filter: yes +# udp_duplicated_packet_filter: yes +# inject_duplicated_packet_filter: yes + +# traffic_mirror: +# enable_raw_traffic: yes +# enable_decrypted_traffic: yes + +# packet_capture: +# enable: yes + +# proxy: +# enable: yes + +# voip_record: +# enable_sip: yes +# enable_rtp: yes + +# overload_protection: +# enable: yes +# detect_interval_in_ms: 500 +# detect_smooth_avg_window: 2 +# detect_threshold_cpu_usages: 90 +# recovery_detect_cycle_in_sec: 30 + +# vsys_id: 1 + +# etherfabric_settings: +# keepalive: +# ip: 10.254.19.1 +# mask: 255.255.255.0 + +# sapp_affinity: [5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76] +# tfe_affinity: [77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92] +# sce_affinity: [92] +# shaping_affinity: [93] +# pktio_affinity: [94] +# inject_adapter_affinity: [95] + +# tfe_rps_mask: "00000000" + +# nic_policy_log_name: eth0 +# nic_raw_name: eth0 +# nic_mirror_name: +# firewall: eth0 +# proxy: eth0 + +# define_enable_val_yes: yes +# define_enable_val_no: no + +# coredump: +# format: "minidump" +# collect: "local" +# sentry_url: "www.testing.com" + +# session_id_generator: +# snowflake_worker_id_base: 1 +# snowflake_worker_id_offset: 1 + +# decoders: +# DNS: yes +# QUIC: yes +# HTTP: yes +# HTTP_GZIP: yes +# MAIL: yes +# MAIL_BASE64: yes +# FTP: yes +# SSL: yes +# SSL_CERT: yes +# SSL_JA3: yes +# RTP: yes +# SIP: yes +# SSH: yes +# SOCKS: yes +# STRATUM: yes +# RDP: yes +# DTLS: yes +# SSL_DETAIN_FRAG_CHELLO: no + +# configHash: "defaulthash" + +# shaping: +# enable: no +# inject_adapter: +# enable: yes + +# service_chaining: +# enable: yes + +# sce_config: +# steering_nic: nf_0_sce +# vxlan_config: +# endpoint_nic: ep_0_sce_l3 +# endpoint_ip: 127.0.0.1 +# endpoint_gateway: 127.0.0.1 +# endpoint_netip: 127.0.0.1 +# endpoint_mask: 24 +# vlan_config: +# endpoint_nic: ep_0_sce_l2 + +# proxy_config: +# proxy_nic: nf_1_proxy + +# sid: +# firewall: 1000 +# proxy: 1001 +# sce: 1002 +# shaping: 1003 +# inject_adapter: 1064 + +# shaping_config: +# shaping_nic: nf_1_shaping_engine + +# inject_adapter_config: +# inject_adapter_nic: nf_1_shaping_engine + +# app_symbol_index: 1 +# distmode: 2 + +# debug: +# firewall: +# enable_liveness_probe: yes +# enable_interactive_startup: no +# enable_prestart_script: no +# enable_mount_host_filesystem: no +# #default: /etc/tsg-os/${service_function_name}/firewall_prestart_script.sh +# prestart_script: "" +# proxy: +# enable_liveness_probe: yes +# enable_interactive_startup: no +# enable_prestart_script: no +# enable_mount_host_filesystem: no +# #default: /etc/tsg-os/${service_function_name}/proxy_prestart_script.sh +# prestart_script: "" +# service_chaining: +# enable_liveness_probe: yes +# enable_interactive_startup: no +# enable_prestart_script: no +# enable_mount_host_filesystem: no +# #default: /etc/tsg-os/${service_function_name}/service_chaining_prestart_script.sh +# prestart_script: "" +# shaping: +# enable_liveness_probe: yes +# enable_interactive_startup: no +# enable_prestart_script: no +# enable_mount_host_filesystem: no +# #default: /etc/tsg-os/${service_function_name}/shaping_prestart_script.sh +# prestart_script: "" +# inject_adapter: +# enable_liveness_probe: yes +# enable_interactive_startup: no +# enable_prestart_script: no +# enable_mount_host_filesystem: no +# #default: /etc/tsg-os/${service_function_name}/shaping_prestart_script.sh +# prestart_script: "" + +# session_flags: +# enable: yes + +# dos_protector: +# enable: no + +# stat_policy_enforcer: +# enable: yes + +# traffic_sketch: +# enable: yes + +# policy_sketch: +# enable: yes \ No newline at end of file diff --git a/helmcharts/tsg-cm/build.mk b/helmcharts/tsg-cm/build.mk new file mode 100644 index 00000000..75a7e2c6 --- /dev/null +++ b/helmcharts/tsg-cm/build.mk @@ -0,0 +1,3 @@ +rule_target := tsg-cm +rule_prerequisites := +rule_recipes := $(call copy_helmchart_package,$(rule_target)) \ No newline at end of file diff --git a/helmcharts/tsg-cm/files/tsg-cm-chart-0.1.0.tgz b/helmcharts/tsg-cm/files/tsg-cm-chart-0.1.0.tgz new file mode 100644 index 00000000..95d73d15 Binary files /dev/null and b/helmcharts/tsg-cm/files/tsg-cm-chart-0.1.0.tgz differ