🦄 refactor: DPISDN-61:refactor the way of building container and helmchart package for dp-trace-telemetry,packet-io-engine,trex,log-collector and nic-uio-binder.

This commit is contained in:
linxin
2024-10-18 17:20:32 +08:00
committed by fumingwei
parent 456dd51649
commit 5d0e7529a4
72 changed files with 2722 additions and 15 deletions

View File

@@ -139,19 +139,6 @@ spec:
successThreshold: 1
timeoutSeconds: 10
{{- end }}
- env:
- name: MERGER_URLS
value: {{ .Values.mergeExporter.mergeUrls }}
- name: MERGER_PORT
value: "{{ .Values.mergeExporter.mergePort }}"
image: "{{ .Values.mergeExporter.image.repository }}:{{ .Values.mergeExporter.image.tag }}"
imagePullPolicy: {{ .Values.mergeExporter.image.pullPolicy }}
name: exporter-merger
ports:
- name: ex-trace-port
containerPort: {{ .Values.mergeExporter.mergePort }}
protocol: TCP
initContainers:
- name: init-packet-io-engine-ready
image: "registry.gdnt-cloud.website/tsg-init:{{ .Chart.AppVersion }}"

View File

@@ -32,10 +32,17 @@ RUN dnf -y update && \
inotify-tools \
jemalloc \
pcre2 \
epel-release
epel-release \
lshw \
which \
pciutils \
kmod \
libnl3-devel \
perl-generators \
iptables-devel
RUN python2 -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple PrettyTable==0.7.2 && \
python3 -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple jinja2 prettytable tomlq toml sdnotify j2cli j2cli[yaml]
python3 -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple jinja2 prettytable tomlq toml pyyaml argcomplete sdnotify j2cli j2cli[yaml]
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }}

View File

@@ -0,0 +1,14 @@
{% import 'dockerfile-macros.j2' as macros -%}
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }}
# templates
COPY templates/* /templates/
# scripts
COPY --chmod=755 entrypoint.sh /usr/local/bin/
WORKDIR /opt/tsg/dp_trace_telemetry
CMD ["/bin/bash"]

View File

@@ -0,0 +1,3 @@
rule_target := dp-trace-telemetry
rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE)
rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG))

View File

@@ -0,0 +1,22 @@
#!/bin/sh -ex
source /usr/local/bin/entrypoint_public.sh
# start
ldconfig
parse_args "$@"
render_template dp_trace.conf.j2 /opt/tsg/dp_trace_telemetry/etc/dp_trace.conf
if [ ${IS_ENABLE_PRESTART} == "true" ]; then
enable_prestart
fi
if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then
enable_interactive_startup
fi
exec /opt/tsg/dp_trace_telemetry/bin/dp_trace_telemetry -c /opt/tsg/dp_trace_telemetry/etc/dp_trace.conf -d /opt/tsg/dp_trace_telemetry/etc/dp_trace_dy.conf

View File

@@ -0,0 +1,7 @@
packages:
- name: mlnx-ofed-user-space-only-all
version: 5.17.15-1.el8.x86_64.noarch
install_command: dnf
- name: dp_trace_telemetry
version: 0.3.1.e2b1aff
install_command: dnf

View File

@@ -0,0 +1,47 @@
{% import '/templates/macros.j2' as macros -%}
[global]
iocore={{ datapath_trace.datapath_trace_affinity | join (',') }}
zlog_config_path=../etc/dp_trace_zlog.conf
dp_trace_dir=/var/lib/dp_telemetry_daemon
{% if device and device.tags -%}
{% for tag in device.tags -%}
{%- if tag.data_center is defined -%}
data_center={{ tag.data_center }}
{% endif -%}
{%- if tag.device_group is defined -%}
device_group={{ tag.device_group }}
{% endif -%}
{%- endfor %}
{% endif -%}
monit_file_path=/var/run/mrzcpd/mrmonit.app.dp_trace_telemetry.saving
[http_server]
listen_addr=0.0.0.0
listen_port=9086
keep_alive_path=/probe
[kafka]
broker_list="{{ macros.address_port_pairs_render(datapath_trace.olap.kafka_brokers.addresses,",") }}"
{%- if datapath_trace.olap.kafka_brokers.sasl_username is defined -%}
sasl_username={{ datapath_trace.olap.kafka_brokers.sasl_username }}
{% endif -%}
{%- if datapath_trace.olap.kafka_brokers.sasl_username is defined -%}
sasl_passwd={{ datapath_trace.olap.kafka_brokers.sasl_password }}
{%- endif %}
topic_name="DATAPATH-TELEMETRY-RECORD"
[maat]
maat_log_level=3
# 0:json 1:redis
maat_input_mode=1
deferred_load_on=0
table_schema=../etc/table_schema.json
json_cfg_file=../etc/dp_telemetry_rules.json
maat_redis_server={{ macros.cm_address(datapath_trace.cm) }}
maat_redis_port_range={{ macros.cm_port(datapath_trace.cm) }}
maat_redis_db_index=1
[debug]
send_ctrlbuf=0
kafka_dump_to_log=0
arp_pkt_has_ip_test=0

View File

@@ -0,0 +1,26 @@
[dp_trace_rule]
dp_trace_file_max_size_in_KB={{ datapath_trace.datapath_trace_file_max_size_in_KB }}
dp_trace_merge_timeout=30
{% if datapath_trace.rule -%}
{% for rule in datapath_trace.rule -%}
[dp_trace_rule:{{ rule.id }}]
{%- if rule.enable == "yes" -%}
enable=1
{% endif -%}
{%- if rule.enable == "no" -%}
enable=0
{% endif -%}
{%- if rule.bpf_expr is defined -%}
bpf_expr={{ rule.bpf_expr }}
{% endif -%}
{%- if rule.packet_max_count is defined -%}
pkt_cnt_max={{ rule.packet_max_count }}
{% endif -%}
{%- if rule.sampling is defined -%}
sampling={{ rule.sampling }}
{% endif -%}
{%- if rule.snaplen is defined -%}
snaplen={{ rule.snaplen }}
{% endif -%}
{%- endfor %}
{% endif -%}

View File

@@ -0,0 +1,14 @@
{% import 'dockerfile-macros.j2' as macros -%}
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }}
# templates
COPY templates/* /templates/
# scripts
COPY --chmod=755 entrypoint.sh /usr/local/bin/
WORKDIR /opt/tsg/inject-adapter
CMD ["/bin/bash"]

View File

@@ -0,0 +1,3 @@
rule_target := inject-adapter
rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE)
rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG))

View File

@@ -0,0 +1,22 @@
#!/bin/sh -ex
source /usr/local/bin/entrypoint_public.sh
# start
ldconfig
parse_args "$@"
render_template packet_adapter.conf.j2 /opt/tsg/packet_adapter/conf/packet_adapter.conf
if [ ${IS_ENABLE_PRESTART} == "true" ]; then
enable_prestart
fi
if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then
enable_interactive_startup
fi
exec /opt/tsg/packet_adapter/bin/packet_adapter

View File

@@ -0,0 +1,4 @@
packages:
- name: packet_adapter
version: 2.0.2.4f48b59
install_command: dnf

View File

@@ -0,0 +1,18 @@
[packet_io]
thread_num={{ inject_adapter_affinity | length }}
cpu_mask={{ inject_adapter_affinity | join (',') }}
rx_burst_max=128
bypass_traffic=0
app_symbol=PacketAdapter
app_device={{ inject_adapter_config.inject_adapter_nic }}
[stat]
output_file=log/packet_adapter.fs2
statsd_server=127.0.0.1
statsd_port=8100
# 1 : FS_OUTPUT_STATSD
# 2 : FS_OUTPUT_INFLUX_LINE
statsd_format=1
statsd_cycle=2
prometheus_listen_port=9009
prometheus_listen_url=/packet_adapter_prometheus

View File

@@ -0,0 +1,12 @@
{% import 'dockerfile-macros.j2' as macros -%}
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }}
# scripts
COPY --chmod=755 entrypoint.sh /usr/local/bin/
WORKDIR /opt/tsg/log-collector
CMD ["/bin/bash"]

View File

@@ -0,0 +1,3 @@
rule_target := log-collector
rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE)
rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG))

View File

@@ -0,0 +1,5 @@
#!/bin/sh -ex
sysctl -w fs.inotify.max_user_instances=8192
exec /opt/tsg/log-collector/bin/log-collector

View File

@@ -0,0 +1,4 @@
packages:
- name: log-collector
version: 1.0.1-6df738d
install_command: dnf

View File

@@ -0,0 +1,12 @@
{% import 'dockerfile-macros.j2' as macros -%}
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }}
# scripts
COPY --chmod=755 entrypoint.sh /usr/local/bin/
WORKDIR /opt/tsg/nic-uio-binder
CMD ["/bin/bash"]

View File

@@ -0,0 +1,3 @@
rule_target := nic-uio-binder
rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE)
rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG))

View File

@@ -0,0 +1,4 @@
#!/bin/sh -ex
exec /opt/tsg/nic-uio-binder/bin/nic-uio-binder

View File

@@ -0,0 +1,4 @@
packages:
- name: nic-uio-binder
version: 1.0.13-3d0dc82
install_command: dnf

View File

@@ -0,0 +1,20 @@
{% import 'dockerfile-macros.j2' as macros -%}
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }} && \
sed -Ei -c "s|/opt/mrzcpd|/opt/tsg/mrzcpd|g" /etc/profile.d/mrzcpd.sh && \
sed -Ei -c "s|/opt/mrzcpd|/opt/tsg/mrzcpd|g" /opt/tsg/mrzcpd/lib/pkgconfig/mrzcpd.pc
# files
COPY files/mrzcpd.conf /etc/ld.so.conf.d/
COPY files/mrzcpd /etc/sysconfig/
# templates
COPY templates/* /templates/
# scripts
COPY --chmod=755 entrypoint.sh /usr/local/bin/
WORKDIR /opt/tsg/mrzcpd/
CMD ["/bin/bash"]

View File

@@ -0,0 +1,3 @@
rule_target := packet-io-engine
rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE)
rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG))

View File

@@ -0,0 +1,20 @@
#!/bin/sh -ex
source /usr/local/bin/entrypoint_public.sh
# start
parse_args "$@"
render_template mrglobal.conf.j2 /opt/tsg/mrzcpd/etc/mrglobal.conf
if [ ${IS_ENABLE_PRESTART} == "true" ]; then
enable_prestart
fi
if [ ${IS_ENABLE_INTERACTIVE_STARTUP} == "true" ]; then
enable_interactive_startup
fi
exec /opt/tsg/mrzcpd/bin/mrzcpd -c /opt/tsg/mrzcpd/etc/mrglobal.conf -s /opt/tsg/mrzcpd/etc/mrglobal.dynamic.conf

View File

@@ -0,0 +1,3 @@
MRZCPD_ROOT=/opt/tsg/mrzcpd
HUGEPAGE_NUM_1G=32
DEFAULT_UIO_MODULE="vfio_pci"

View File

@@ -0,0 +1,2 @@
# Marsio ZeroCopy Driver Library Path
/opt/tsg/mrzcpd/lib

View File

@@ -0,0 +1,19 @@
packages:
- name: mlnx-ofed-user-space-only-all
version: 5.17.15-1.el8.x86_64.noarch
install_command: dnf
- name: mrzcpd-icelake-server
version: 4.8.21.2d714a5
download_command: dnf
install_command: rpm
install_command_options: "--prefix /opt/tsg/mrzcpd"
- name: mrzcpd-znver1
version: 4.8.21.2d714a5
download_command: dnf
install_command: rpm
install_command_options: "--prefix /opt/tsg/mrzcpd"
- name: mrzcpd-corei7
version: 4.8.21.2d714a5
download_command: dnf
install_command: rpm
install_command_options: "--prefix /opt/tsg/mrzcpd"

View File

@@ -0,0 +1,473 @@
{%- set nf_nic_list = [] -%}
{%- set mirr_nic_list = [] -%}
{%- set ep_nic_l2_list = [] -%}
{%- set ep_nic_l3_list = [] -%}
{%- set sid_step = 4 -%}
{%- set cycle = mrzcpd.nf_count -%}
{%- set enter = "\n" -%}
{%- for index in range(cycle) %}
{{- nf_nic_list.append( 'nf_'+loop.index0|string+'_fw') or "" }}
{{- nf_nic_list.append( 'nf_'+loop.index0|string+'_proxy') or "" }}
{{- nf_nic_list.append( 'nf_'+loop.index0|string+'_sce') or "" }}
{{- nf_nic_list.append( 'nf_'+loop.index0|string+'_shaping_engine') or "" }}
{{- ep_nic_l2_list.append( 'ep_'+loop.index0|string+'_sce_l2') or "" }}
{{- ep_nic_l3_list.append( 'ep_'+loop.index0|string+'_sce_l3') or "" }}
{{- mirr_nic_list.append( 'mirr_'+loop.index0|string+'_fw') or "" }}
{{- mirr_nic_list.append( 'mirr_'+loop.index0|string+'_proxy') or "" }}
{%- endfor -%}
[device]
device=
{%- if mrzcpd.interfaces -%}
{%- for interface in mrzcpd.interfaces -%}{{ interface.name }}{%- if not loop.last %},{% endif %}{%- endfor -%},
{%- endif -%}
{%- if mrzcpd.virtual_interfaces -%}
{%- if mrzcpd.interfaces %},{%- endif -%}
{%- for interface in mrzcpd.virtual_interfaces -%}{{ interface.name }}{%- if not loop.last %},{% endif %}{%- endfor -%},
{%- endif -%}
{{nf_nic_list|join(',')}},{{ep_nic_l2_list|join(',')}},{{ep_nic_l3_list|join(',')}},{{mirr_nic_list|join(',')}},nf_inject,virtio_dign_c,virtio_dign_s
sz_tunnel={{ mrzcpd.pktmbuf_queue_size }}
sz_buffer=0
# Configuration settings for the physical device (Phy dev).
{%- for interface in mrzcpd.interfaces %}
[device:{{ interface.name }}]
{%- if interface.address is defined %}
in_addr={{ interface.address }}
in_mask={{ interface.mask | default('') }}
{%- if interface.gateway is defined %}
gateway={{ interface.gateway }}
{%- endif %}
{%- endif %}
nr_rxdesc=8192
nr_txdesc=8192
allmulticast=1
rssmode=3
promisc={{ interface.promisc | default(0) }}
mtu={{ interface.mtu | default(1500) }}
{%- if interface.role == "5" %}
driver=1
{%- else %}
driver=0
{%- endif %}
role={{ interface.role | default('') }}
{%- if interface.type is defined %}
type={{ interface.type }}
{%- endif %}
{%- if interface.vlan is defined %}
{%- if interface.vlan|length > 0 %}
allow_vlan_ids={%- for vlan in interface.vlan -%}
{%- if not loop.first %}, {% endif -%}{{ vlan.vlan_id }}
{%- endfor %}
{%- endif %}
{%- endif %}
{%- if interface.en_representor is defined %}
en_representor={{ interface.en_representor }}
{%- endif %}
{%- if interface.representor_ns is defined %}
representor_ns={{ interface.representor_ns }}
{%- endif %}
{%- if interface.representor_dev is defined %}
representor_dev={{ interface.representor_dev }}
{%- endif %}
{%- if interface.rxcore is defined %}
rx_cores={{ interface.rxcore }}
{%- endif %}
{% endfor %}
# Configuration settings for network bonding (bond) functionality.
{%- for interface in mrzcpd.virtual_interfaces %}
{%- if interface.type == "1" %}
[device:{{ interface.name }}]
{%- if interface.address %}
in_addr={{ interface.address }}
in_mask={{ interface.mask }}
{%- if interface.gateway %}
gateway={{ interface.gateway }}
{%- endif %}
{%- endif %}
rssmode=3
{%- if interface.promisc %}
promisc={{ interface.promisc }}
{%- endif %}
{%- if interface.mtu %}
mtu={{ interface.mtu }}
{%- endif %}
{%- if interface.role == "5" %}
driver=1
{%- else %}
driver=0
{%- endif %}
role={{ interface.role }}
{%- if interface.type %}
type={{ interface.type }}
{%- endif %}
{%- if interface.bond_mode %}
bond_mode={{ interface.bond_mode }}
{%- endif %}
{%- if interface.bond_xmit_policy %}
bond_xmit_policy={{ interface.bond_xmit_policy }}
{%- endif %}
{%- if interface.slaves %}
bond_slaves={%- for slave in interface.slaves %}{%- if not loop.first %},{%- endif %}{{ slave.interface }}{%- endfor %}
{%- endif %}
{%- if interface.rxcore %}
rx_cores={{ interface.rxcore }}
{%- endif %}
{%- endif %}
{% endfor %}
# Configuration settings for the virtual device (virtual dev) management.
[device:virtio_dign_c]
driver=1
role=1
[device:virtio_dign_s]
driver=1
role=1
# Configuration settings for the representational (represent) device.
{%- for interface in mrzcpd.virtual_interfaces %}
{%- if interface.en_representor == "1" %}
[device:{{ interface.name }}]
{%- if interface.promisc %}
promisc={{ interface.promisc }}
{%- endif %}
{%- if interface.mtu %}
mtu={{ interface.mtu }}
{%- endif %}
{%- if interface.role == "5" %}
driver=1
{%- else %}
driver=0
{%- endif %}
role={{ interface.role }}
{%- if interface.vlan and interface.vlan | length > 0 %}
allow_vlan_ids={%- for vlan in interface.vlan %}{%- if not loop.first %},{%- endif %}{{ vlan.vlan_id }}{%- endfor %}
{%- endif %}
en_representor={{ interface.en_representor }}
{%- if interface.representor_ns %}
representor_ns={{ interface.representor_ns }}
{%- endif %}
{%- if interface.rxcore %}
rx_cores={{ interface.rxcore }}
{%- endif %}
{%- endif %}
{% endfor %}
# Configuration settings for the VLAN (Virtual Local Area Network) sub-interface.
{%- for interface in mrzcpd.interfaces %}
{%- if interface.vlan is defined %}
{%- for vlan in interface.vlan %}
{%- if vlan.ipv4_address is defined or vlan.ipv6_address is defined %}
[device:{{ interface.name }}:vlan:{{ vlan.vlan_id }}]
{%- if vlan.ipv4_address is defined %}
in_addr_v4={{ vlan.ipv4_address }}
{%- endif %}
{%- if vlan.ipv4_mask is defined %}
in_mask_v4={{ vlan.ipv4_mask }}
{%- endif %}
{%- if vlan.ipv6_address is defined %}
in_addr_v6={{ vlan.ipv6_address }}
{%- endif %}
{%- if vlan.ipv6_mask is defined %}
in_mask_v6={{ vlan.ipv6_mask }}
{%- endif %}
{%- endif %}
{% endfor %}
{%- endif %}
{%- endfor %}
{%- for interface in mrzcpd.virtual_interfaces %}
{%- if interface.vlan is defined %}
{%- for vlan in interface.vlan %}
{%- if vlan.ipv4_address is defined or vlan.ipv6_address is defined %}
[device:{{ interface.name }}:vlan:{{ vlan.vlan_id }}]
{%- if vlan.ipv4_address is defined %}
in_addr_v4={{ vlan.ipv4_address }}
{%- endif %}
{%- if vlan.ipv4_mask is defined %}
in_mask_v4={{ vlan.ipv4_mask }}
{%- endif %}
{%- if vlan.ipv6_address is defined %}
in_addr_v6={{ vlan.ipv6_address }}
{%- endif %}
{%- if vlan.ipv6_mask is defined %}
in_mask_v6={{ vlan.ipv6_mask }}
{%- endif %}
{%- endif %}
{% endfor %}
{%- endif %}
{% endfor %}
# Configuration settings for the shmdey device.
[device:nf_inject]
driver=3
role=4
{% for index in range(cycle) %}
[device:nf_{{ index }}_fw]
driver=3
role=4
[device:nf_{{ index }}_sce]
driver=3
role=4
[device:ep_{{ index }}_sce_l2]
driver=3
role=4
[device:ep_{{ index }}_sce_l3]
en_representor=1
representor_ns=1
driver=3
role=4
[device:nf_{{ index }}_shaping_engine]
driver=3
role=4
[device:nf_{{ index }}_proxy]
driver=3
role=4
[device:mirr_{{ index }}_fw]
driver=3
role=4
[device:mirr_{{ index }}_proxy]
driver=3
role=4
{% endfor %}
# This module handles the global configuration settings.
[service]
iocore={{ mrzcpd.cpu_affinity | join (',') }}
distmode={{ mrzcpd.distmode }}
hashmode=0
poll_wait_throttle={{ mrzcpd.enable_poll_wait_throttle | default(0) }}
poll_wait_enable=1
[eal]
virtaddr=0x500000000000
loglevel=7
huge-dir=/run/mrzcpd/hugepages
legacy_mem=0
[keepalive]
check_spinlock=1
[ctrlzone]
ctrlzone0=tunnat,64
[pool]
create_mode={{ mrzcpd.pktmbuf_pool_create_mode }}
sz_direct_pktmbuf={{ mrzcpd.pktmbuf_pool_max_elements_count | int }}
sz_indirect_pktmbuf={{ mrzcpd.indirect_pktmbuf | default(0) }}
sz_cache=512
sz_data={{ mrzcpd.pktmbuf_max_size | default(0) }}
[buffer_leak_detect]
check_buffer_leak={{ mrzcpd.enable_check_pktmbuf_leak | default(0) }}
[ctrlmsg]
listen_addr=0.0.0.0
listen_port=46789
[rpc]
addr=127.0.0.1
port=56789
[limits]
nr_max_ef_adapters={{ mrzcpd.nr_max_ef_adapters | default(0) }}
nr_max_vwires={{ mrzcpd.nr_max_vwires | default(0) }}
nr_max_tera_adapters={{ mrzcpd.nr_max_tera_adapters | default(0) }}
nr_max_link_dbs={{ mrzcpd.nr_max_link_dbs | default(0) }}
# This module is used to configure the access mode settings.
[ef_adapters]
sid_start=100
sid_end=200
{%- for service in mrzcpd.services %}
{%- if service.type == "etherfabric_adapter" %}
[ef_adapter:{{ loop.index0 }}]
ef_adapter_id={{ service.id }}
{%- if service.mode == "virtual-wire" %}
mode=1
{%- elif service.mode == "tap" %}
mode=2
{%- endif %}
listen_device={{ service.listen_on_device | default('') }}
{%- endif %}
{% endfor %}
# The configuration sets the access mode to vwire mode.
[vwires]
sid_start=300
sid_end=400
[vwire:0]
vwire_id=0
interface_int=virtio_dign_c
interface_ext=virtio_dign_s
{% for virtual_wire in mrzcpd.virtual_wires %}
[vwire:{{ loop.index0 + 1 }}]
vwire_id={{ virtual_wire.id }}
interface_int={{ virtual_wire.internal_interface }}
interface_ext={{ virtual_wire.external_interface }}
{%- if virtual_wire.obp_device is defined %}
obp_device={{ virtual_wire.obp_device }}
{%- endif %}
{%- if virtual_wire.obp_segment is defined %}
obp_segment={{ virtual_wire.obp_segment }}
{%- endif %}
{% endfor %}
# The configuration sets the access mode to tera adapter mode.
[tera_adapters]
sid_start=500
sid_end=600
{%- for service in mrzcpd.services %}
{%- if service.type == "tera_adapter" %}
[tera_adapter:{{ loop.index0 }}]
tera_adapter_id={{ service.id }}
listen_device={{ service.listen_on_device | default('') }}
vlan_int={{ service.vlan_int | default('') }}
vlan_ext={{ service.vlan_ext | default('') }}
mac_flipping={{ service.mac_flipping | default(0) }}
{%- endif %}
{% endfor %}
# This module is used to configure health check settings.
{%- for index in range(cycle) %}
[health_check:{{ index * sid_step }}]
name=nf{{ index }}_fw_health_check
device=nf_{{ index }}_fw
method=0
multiplier=20
interval=10
[health_check:{{ index * sid_step + 1 }}]
name=nf{{ index }}_proxy_health_check
device=nf_{{ index }}_proxy
method=0
multiplier=20
interval=10
[health_check:{{ index * sid_step + 2 }}]
name=nf{{ index }}_sce_health_check
device=nf_{{ index }}_sce
method=0
multiplier=20
interval=10
[health_check:{{ index * sid_step + 3 }}]
name=nf{{ index }}_shaping_engine_health_check
device=nf_{{ index }}_shaping_engine
method=0
multiplier=20
interval=10
{% endfor %}
[health_check:{{ cycle * sid_step }}]
name=nf_inject_health_check
device=nf_inject
method=0
multiplier=20
interval=10
# This module is used for configuring the service load balancing settings.
[service_lb]
sid_start={{ mrzcpd.service_lb.sid_range_start }}
sid_end={{ mrzcpd.service_lb.sid_range_end }}
{%- for index in range(cycle) %}
{%- set load_balance_index = index * sid_step %}
[load_balance:{{ load_balance_index }}]
sid={{ mrzcpd.service_lb.sid_range_start + load_balance_index }}
mode=0
devices=nf_{{ index }}_fw,0
health_check_sessions=nf{{ index }}_fw_health_check
[load_balance:{{ load_balance_index + 1 }}]
sid={{ mrzcpd.service_lb.sid_range_start + load_balance_index + 1 }}
mode=0
devices=nf_{{ index }}_proxy,0
health_check_sessions=nf{{ index }}_proxy_health_check
[load_balance:{{ load_balance_index + 2 }}]
sid={{ mrzcpd.service_lb.sid_range_start + load_balance_index + 2 }}
mode=0
devices=nf_{{ index }}_sce,0
health_check_sessions=nf{{ index }}_sce_health_check
[load_balance:{{ load_balance_index + 3 }}]
sid={{ mrzcpd.service_lb.sid_range_start + load_balance_index + 3 }}
mode=0
devices=nf_{{ index }}_shaping_engine,0
health_check_sessions=nf{{ index }}_shaping_engine_health_check
{% endfor %}
[load_balance:{{ cycle * sid_step }}]
sid={{ mrzcpd.service_lb.sid_range_start + cycle * sid_step }}
mode=0
devices=nf_inject,0
health_check_sessions=nf_inject_health_check
# This module is responsible for configuring the link database (linkdb) settings.
{%- for linkdb in mrzcpd.linkdb %}
[link_db:{{ linkdb.id }}]
type={{ linkdb.type }}
traffic_link_id={{ linkdb.traffic_link_id }}
{%- if linkdb.vwire_id is defined %}
vwire_id={{ linkdb.vwire_id }}
{%- endif %}
{%- if linkdb.ef_ip_addr is defined and linkdb.ef_link_id is defined %}
ef_ip_addr={{ linkdb.ef_ip_addr }}
ef_link_id={{ linkdb.ef_link_id }}
{%- endif %}
{% endfor %}
# This module is used for configuring the network bridge settings.
[bridge:0]
{%- if mrzcpd.service_chaining.vxlan_interface is defined %}
master_device={{ mrzcpd.service_chaining.vxlan_interface }}
devices={{ mrzcpd.service_chaining.vxlan_interface }},{{ ep_nic_l3_list |join(',')}}
{%- else %}
devices={{ ep_nic_l3_list |join(',')}}
{%- endif %}
[bridge:1]
{%- if mrzcpd.traffic_mirror.interface is defined %}
master_device={{ mrzcpd.traffic_mirror.interface }}
devices={{ mrzcpd.traffic_mirror.interface }},{{ mirr_nic_list |join(',')}}
{%- else %}
devices={{ mirr_nic_list |join(',')}}
{%- endif %}
[bridge:2]
{%- if mrzcpd.service_chaining.vlan_interface is defined %}
master_device={{ mrzcpd.service_chaining.vlan_interface }}
devices={{ mrzcpd.service_chaining.vlan_interface }},{{ ep_nic_l2_list |join(',')}}
{%- else %}
devices={{ ep_nic_l2_list |join(',')}}
{%- endif %}
# This module is responsible for configuring the static classifier rules.
{%- for index in range(cycle) %}
[classifier_rule:{{ index }}]
rule_id={{ index + 1 }}
dst_ip_addr_v4=192.0.2.{{ index + 101 }}
dst_ip_mask_v4=32
action=2
priority=0
sid={{ index * sid_step + 1000 }}
vwire_id=0
{% endfor %}
{%- for index in range(cycle) %}
[classifier_rule:{{ index + cycle }}]
rule_id={{ index + cycle + 1 }}
src_ip_addr_v4=192.0.2.{{ index + 101 }}
src_ip_mask_v4=32
action=2
priority=0
sid={{ index * sid_step + 1000 }}
vwire_id=0
{% endfor %}

View File

@@ -0,0 +1,98 @@
{%- for obp in obp_devices -%}
[olp_device:{{loop.index0}}]
name = {{ obp.name }}
type = {{ obp.type }}
connect = {{ obp.connect }}
in_addr = {{ obp.address }}
port = {{ obp.port }}
{% for seg in obp.segment %}
[{{obp.name}}:{{seg.id}}]
state = {{ seg.workline}}
heartbeat = {{ seg.heartbeat_mode }}
heartbeat_send_interval_in_ms= {{ seg.hb_send_interval_in_ms }}
heartbeat_timeout_interval_in_ms= {{ seg.hb_timeout_interval_in_ms}}
heartbeat_lost_threshold = {{ seg.hb_lost_threshold }}
nonrevertive_mode = {{ seg.nonrevertive_mode }}
{% endfor %}
{%- endfor %}
{% for rule in ruleset %}
[classifier_rule:{{ loop.index0 }}]
{%- if rule.RuleId is defined %}
rule_id={{ rule.RuleId }}
{%- endif %}
ruleset_type=0
{%- if rule.SrcIpV4 is defined %}
src_ip_addr_v4={{ rule.SrcIpV4 }}
{%- if rule.SrcIpMask is defined %}
src_ip_mask_v4={{ rule.SrcIpMask }}
{%- endif -%}
{%- endif -%}
{%- if rule.DstIpV4 is defined %}
dst_ip_addr_v4={{ rule.DstIpV4 }}
{%- if rule.DstIpMask is defined %}
dst_ip_mask_v4={{ rule.DstIpMask }}
{%- endif -%}
{%- endif -%}
{%- if rule.SrcIpV6 is defined %}
src_ip_addr_v6={{ rule.SrcIpV6 }}
{%- if rule.SrcIpMask is defined %}
src_ip_mask_v6={{ rule.SrcIpMask }}
{%- endif -%}
{%- endif -%}
{%- if rule.DstIpV6 is defined %}
dst_ip_addr_v6={{ rule.DstIpV6 }}
{%- if rule.DstIpMask is defined %}
dst_ip_mask_v6={{ rule.DstIpMask }}
{%- endif -%}
{%- endif -%}
{%- if rule.SrcPortLow is defined %}
src_port_start={{ rule.SrcPortLow }}
{%- endif -%}
{%- if rule.SrcPortHigh is defined %}
src_port_end={{ rule.SrcPortHigh}}
{%- endif -%}
{%- if rule.DstPortLow is defined %}
dst_port_start={{ rule.DstPortLow }}
{%- endif -%}
{%- if rule.DstPortHigh is defined %}
dst_port_end={{ rule.DstPortHigh }}
{%- endif -%}
{%- if rule.Proto is defined %}
proto={{ rule.Proto }}
{%- endif -%}
{%- if rule.Priority is defined %}
priority={{ rule.Priority }}
{%- endif -%}
{%- if rule.Sid is defined %}
sid={{ rule.Sid }}
{%- endif -%}
{%- if rule.VwireId is defined %}
vwire_id={{ rule.VwireId }}
{%- endif -%}
{%- if rule.EfId is defined %}
ef_adapter_id={{rule.EfId}}
{%- endif -%}
{%- if rule.TeraId is defined %}
tera_adapter_id={{ rule.TeraId }}
{%- endif %}
action=2
{% endfor %}
[dp_trace_rule]

View File

@@ -0,0 +1,20 @@
{% import 'dockerfile-macros.j2' as macros -%}
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
COPY files/* /opt/tsg/trex/
RUN {{ macros.install_packages(packages) }} && \
{{ macros.clean_after_install_packages() }} && \
tar -xzvf /opt/tsg/trex/v3.02.tar.gz -C /opt/tsg/trex && \
rm -f /opt/tsg/trex/v3.02.tar.gz && \
tar -xzvf /opt/tsg/trex/trex-helper.tar.gz -C /opt/tsg/ && \
rm -f /opt/tsg/trex/trex-helper.tar.gz
# scripts
COPY --chmod=755 entrypoint.sh /usr/local/bin/
WORKDIR /opt/tsg/trex
CMD ["/bin/bash"]

3
containers/trex/build.mk Normal file
View File

@@ -0,0 +1,3 @@
rule_target := trex
rule_prerequisites := $(DEP_ENV_FILES) $(YUM_REPO_FILE) $(DOCKERFILE_MACROS) $(BUILD_DIR)/base/$(BUILD_DONE_FILE)
rule_recipes := $(call build_image_from_dockerfile,$(rule_target),$(IMAGE_REGISTRY)/base:$(IMAGE_TAG),$(IMAGE_REGISTRY)/$(rule_target):$(IMAGE_TAG))

View File

@@ -0,0 +1,7 @@
#!/bin/sh -ex
chmod +x /opt/tsg/trex/generate-trex-conf.sh
echo 'export PATH=$PATH:/opt/tsg/trex-helper' >> ~/.bashrc
echo 'eval "$(register-python-argcomplete trex-helper)"' >> ~/.bashrc
source ~/.bashrc
/opt/tsg/trex/generate-trex-conf.sh
while true; do sleep 1024;done

View File

@@ -0,0 +1,58 @@
#!/bin/bash
CPU_AFFINITY=(${CPU_AFFINITY//,/ })
NIC_PCI=$(env | grep '_NIC_PCI' | cut -d'=' -f2 | sed 's/0000://')
arr=($NIC_PCI)
NIC_PCI=""
for interface in "${arr[@]}"; do
if [ ! -z "$NIC_PCI" ]; then
NIC_PCI+=","
fi
NIC_PCI+="\"$interface\""
done
port_limit=${#arr[@]}
declare -A NUMA
for cpu in ${CPU_AFFINITY[@]}; do
numa_node=$(lscpu -p=CPU,NODE | grep "^$cpu," | cut -d',' -f2)
NUMA[$numa_node]+="$cpu,"
done
cat << EOF > /etc/trex_cfg.yaml
- port_limit: $port_limit
version: 2
stack: legacy
interfaces: [${NIC_PCI}]
port_mtu: 2000
rx_desc: 4096
tx_desc: 4096
new_memory : true
port_info :
- ip : 17.17.0.1
default_gw : 17.18.0.1
dest_mac : "00:11:22:33:44:55"
src_mac : "aa:bb:cc:dd:ee:ff"
- ip : 17.18.0.1
default_gw : 17.17.0.1
dest_mac : "aa:bb:cc:dd:ee:ff"
src_mac : "00:11:22:33:44:55"
- ip : 17.19.0.1
default_gw : 17.20.0.1
dest_mac : "00:11:22:33:44:54"
src_mac : "aa:bb:cc:dd:ee:fe"
- ip : 17.20.0.1
default_gw : 17.19.0.1
dest_mac : "aa:bb:cc:dd:ee:fe"
src_mac : "00:11:22:33:44:54"
platform:
master_thread_id: ${MASTER_IOCORE}
latency_thread_id: ${LATENCY_IOCORE}
dual_if:
EOF
for numa_node in "${!NUMA[@]}"; do
threads=${NUMA[$numa_node]%,}
echo " - socket: $numa_node" >> /etc/trex_cfg.yaml
echo " threads: [${threads// /,}]" >> /etc/trex_cfg.yaml
done

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,4 @@
packages:
- name: mlnx-ofed-user-space-only-all
version: 5.17.15-1.el8.x86_64.noarch
install_command: dnf

View File

@@ -32,6 +32,11 @@ define copy_manifest
cp $(1)/files/*.yaml $(MANIFEST_DIR)
endef
define render_manifest_from_env
@mkdir -p $(MANIFEST_DIR)
/usr/local/bin/j2 $(1)/templates/$(2).j2 -o $(MANIFEST_DIR)/$(2)
endef
.PHONY: all clean $(HELMCHART_NAMES)
all: $(HELMCHART_NAMES)

View File

@@ -0,0 +1,3 @@
rule_target := dp-trace-telemetry
rule_prerequisites :=
rule_recipes := $(call build_helmchart_package,$(rule_target),$(VERSION),$(APP_VERSION))

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,6 @@
apiVersion: v2
appVersion: 1.16.0
description: A Helm chart for Kubernetes
name: datapath-trace
type: application
version: 0.1.0

View File

@@ -0,0 +1,21 @@
{{- define "dp_trace_telemetry.volume.prestart" -}}
{{- if eq .Values.datapath_trace.debug.enable_prestart_script .Values.define_enable_val_yes }}
- name: dp_trace_telemetry-prestart
hostPath:
{{- if .Values.datapath_trace.debug.prestart_script }}
path: {{ .Values.dp_trace_telemetry.debug.prestart_script }}
{{- else }}
path: /etc/tsg-os/{{ .Release.Name }}/dp_trace_telemetry_prestart_script.sh
{{- end }}
type: FileOrCreate
{{- end }}
{{- end -}}
{{- define "dp_trace_telemetry.mount.prestart" -}}
{{- if eq .Values.datapath_trace.debug.enable_prestart_script .Values.define_enable_val_yes }}
- name: prestart-dir
mountPath: /tmp/prestart
- name: dp_trace_telemetry-prestart
mountPath: /opt/tsg/scripts/prestart.sh
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,10 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
serviceFunction: {{ .Release.Name }}
name: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources: ["services", "nodes"]
verbs: ["get", "list", "watch"]

View File

@@ -0,0 +1,14 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
serviceFunction: {{ .Release.Name }}
name: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ .Release.Name }}
subjects:
- kind: ServiceAccount
name: {{ .Release.Name }}
namespace: {{ .Release.Namespace }}

View File

@@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dp-trace-telemetry-reload-{{ .Release.Name }}
data:
values.yaml: |
datapath_trace:
datapath_trace_file_max_size_in_KB:
{{ toYaml .Values.datapath_trace.datapath_trace_file_max_size_in_KB | indent 8 }}
rule:
{{.Values.datapath_trace.rule }}

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dp-trace-telemetry-{{ .Release.Name }}
data:
values.yaml: |
datapath_trace:
enable: {{ toYaml .Values.datapath_trace.enable}}
datapath_trace_affinity:
{{ toYaml .Values.datapath_trace.datapath_trace_affinity | indent 8 }}
cm:
{{ toYaml .Values.datapath_trace.cm | indent 8 }}
olap:
{{ toYaml .Values.datapath_trace.olap | indent 8 }}
device:
{{ toYaml .Values.device | indent 6 }}

View File

@@ -0,0 +1,233 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dp-trace-telemetry
labels:
app: dp-trace-telemetry
component: dp-trace-telemetry
annotations:
configmap.reloader.stakater.com/reload: dp-trace-telemetry-{{ .Release.Name }}
spec:
replicas: 1
selector:
matchLabels:
app: dp-trace-telemetry
strategy:
type: Recreate
template:
metadata:
labels:
app: dp-trace-telemetry
serviceFunction: {{.Release.Name}}
component: dp-trace-telemetry
annotations:
kubectl.kubernetes.io/default-container: dp-trace-telemetry
prometheus.io/port: "9005"
prometheus.io/scrape: "true"
spec:
serviceAccountName: {{ .Release.Name }}
shareProcessNamespace: true
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: inotify-dynamic-conf
image: "registry.gdnt-cloud.website/tsg/os/dp-trace-telemetry:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
command:
- "bash"
- "-ec"
- |
/usr/local/bin/j2 -f yaml /templates/dp_trace_dy.conf.j2 /templates/values.yaml -o /opt/tsg/dp_trace_telemetry/share_conf/dp_trace_dy.conf
WATCH_DIR="/templates/values.yaml"
while inotifywait -r -e modify,create "$WATCH_DIR"; do
echo "send HUB signal to dp_trace"
/usr/local/bin/j2 -f yaml /templates/dp_trace_dy.conf.j2 /templates/values.yaml -o /opt/tsg/dp_trace_telemetry/share_conf/dp_trace_dy.conf
pkill -1 dp-trace-telemetry
echo "signal send"
done
volumeMounts:
- name: dp-trace-dy
mountPath: "/templates/values.yaml"
subPath: "values.yaml"
- name: share-config
mountPath: /opt/tsg/dp_trace_telemetry/share_conf/
lifecycle:
postStart:
exec:
command:
- "bash"
- "-ec"
- |
while true; do
if [ -f "/opt/tsg/dp_trace_telemetry/share_conf/dp_trace_dy.conf" ]; then
echo "File /opt/tsg/dp_trace_telemetry/share_conf/dp_trace_dy.conf exists. Exiting."
exit 0
fi
echo "File /opt/tsg/dp_trace_telemetry/share_conf/dp_trace_dy.conf does not exist. Sleeping for 2 seconds."
sleep 2
done
- name: dp-trace-telemetry
image: "registry.gdnt-cloud.website/tsg/os/dp-trace-telemetry:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
workingDir: /opt/tsg/dp_trace_telemetry/
command:
- "bash"
- "-ec"
- |
ln -sf /opt/tsg/dp_trace_telemetry/share_conf/dp_trace_dy.conf /opt/tsg/dp_trace_telemetry/etc/dp_trace_dy.conf
/usr/local/bin/entrypoint.sh \
{{- if .Values.datapath_trace.debug.enable_prestart_script }}
--enable_prestart \
{{- end }}
{{- if .Values.datapath_trace.debug.enable_interactive_startup }}
--enable_interactive_startup \
{{- end }}
|| echo "Failed to start."
volumeMounts:
- name: dp-trace-telemetry-configs-volume
mountPath: "/templates/values.yaml"
subPath: "values.yaml"
- name: dp-telemetry-daemon
mountPath: /var/lib/dp_telemetry_daemon
- name: share-config
mountPath: /opt/tsg/dp_trace_telemetry/share_conf/
- name: localtime-node
mountPath: /etc/localtime
readOnly: true
- name: opt-tsg-mrzcpd
mountPath: /opt/tsg/mrzcpd
mountPropagation: HostToContainer
readOnly: false
- name: var-run-mrzcpd
mountPath: /var/run/mrzcpd
readOnly: false
- name: var-run-dpdk
mountPath: /var/run/dpdk
readOnly: false
- name: profile-mrzcpd
mountPath: /etc/profile.d/mrzcpd.sh
readOnly: true
- name: ldconfig-mrzcpd
mountPath: /etc/ld.so.conf.d/mrzcpd.conf
readOnly: true
{{- if .Values.datapath_trace.debug.enable_mount_host_filesystem }}
- name: host-root
mountPath: /host
{{- end }}
{{- if .Values.datapath_trace.debug.enable_prestart_script }}
- name: prestart-dir
mountPath: /tmp/prestart
- name: dp-trace-prestart
mountPath: /opt/tsg/scripts/prestart.sh
{{- end }}
env:
- name: SLED_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: DEPLOYMENT_NAME
value: dp-trace-telemetry
securityContext:
privileged: true
ports:
- containerPort: 9086
{{- if .Values.datapath_trace.debug.enable_liveness_probe }}
livenessProbe:
httpGet:
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 30
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 10
startupProbe:
httpGet:
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 30
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 10
{{- end }}
initContainers:
- name: init-packet-io-engine-ready
image: "registry.gdnt-cloud.website/tsg/os/dp-trace-telemetry:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
command:
- "bash"
- "-ec"
- |
until [ $(curl -s -o /dev/null -w "%{http_code}" http://${NODE_IP}:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done
env:
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: init-default-svc
image: "registry.gdnt-cloud.website/tsg/os/dp-trace-telemetry:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
command:
- "bash"
- "-ec"
- |
until nslookup kubernetes.default.svc; do echo waiting for kubernetes service; sleep 2; done
volumes:
- name: share-config
emptyDir: {}
- name: dp-trace-telemetry-configs-volume
configMap:
name: dp-trace-telemetry-{{ .Release.Name }}
- name: prestart-dir
hostPath:
path: /etc/tsg-os/{{ .Release.Name }}/
type: DirectoryOrCreate
- name: dp-trace-prestart
hostPath:
{{- if .Values.datapath_trace.debug.prestart_script }}
path: {{ .Values.datapath_trace.debug.prestart_script }}
{{- else }}
path: /etc/tsg-os/{{ .Release.Name }}/dp_trace_prestart_script.sh
{{- end }}
type: FileOrCreate
- name: opt-tsg-mrzcpd
hostPath:
path: /opt/tsg/mrzcpd
- name: var-run-mrzcpd
hostPath:
path: /var/run/mrzcpd
- name: var-run-dpdk
hostPath:
path: /var/run/dpdk
- name: profile-mrzcpd
hostPath:
path: /etc/profile.d/mrzcpd.sh
type: File
- name: ldconfig-mrzcpd
hostPath:
path: /etc/ld.so.conf.d/mrzcpd.conf
type: File
- name: localtime-node
hostPath:
path: /etc/localtime
- name: dp-telemetry-daemon
hostPath:
path: /var/lib/dp_telemetry_daemon
type: DirectoryOrCreate
- name: dp-trace-dy
configMap:
name: dp-trace-telemetry-reload-{{ .Release.Name }}
- name: host-root
hostPath:
path: /

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
serviceFunction: {{ .Release.Name }}
name: {{ .Release.Name }}

View File

@@ -0,0 +1,91 @@
# Default values for dp-trace-telemetry.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
enable_interactive_startup: no
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true,
# a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
mergeExporter:
image:
repository: quay.io/rebuy/exporter-merger
pullPolicy: Never
tag: "v0.2.0"
mergePort: "9005"
mergeUrls: "http://127.0.0.1:8901/metrics http://127.0.0.1:8902/metrics http://127.0.0.1:8903/metrics"
define_enable_val_yes: true
#datapath_trace: { debug: { enable_liveness_probe: no, enable_interactive_startup: no, enable_prestart_script: 'no', enable_mount_host_filesystem:'no', prestart_script: ""}}
#device: {}

View File

@@ -0,0 +1,3 @@
rule_target := inject-adapter
rule_prerequisites :=
rule_recipes := $(call build_helmchart_package,$(rule_target),$(VERSION),$(APP_VERSION))

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: inject-adapter
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,94 @@
{{- define "traffic-engine.mount.mrzcpd" -}}
- name: opt-tsg-mrzcpd
mountPath: /opt/tsg/mrzcpd
readOnly: false
- name: var-run-mrzcpd
mountPath: /var/run/mrzcpd
readOnly: false
- name: var-run-dpdk
mountPath: /var/run/dpdk
readOnly: false
- name: root-sys
mountPath: /root/sys
readOnly: false
- name: profile-mrzcpd
mountPath: /etc/profile.d/mrzcpd.sh
readOnly: true
- name: ldconfig-mrzcpd
mountPath: /etc/ld.so.conf.d/mrzcpd.conf
readOnly: true
{{- end -}}
{{- define "traffic-engine.mount.localtime" -}}
- name: localtime-node
mountPath: /etc/localtime
readOnly: true
{{- end -}}
{{- define "traffic-engine.volume.mrzcpd" -}}
- name: opt-tsg-mrzcpd
hostPath:
path: /opt/tsg/mrzcpd
- name: var-run-mrzcpd
hostPath:
path: /var/run/mrzcpd
- name: var-run-dpdk
hostPath:
path: /var/run/dpdk
- name: root-sys
hostPath:
path: /root/sys
- name: profile-mrzcpd
hostPath:
path: /etc/profile.d/mrzcpd.sh
type: File
- name: ldconfig-mrzcpd
hostPath:
path: /etc/ld.so.conf.d/mrzcpd.conf
type: File
{{- end -}}
{{- define "traffic-engine.volume.hostpath" -}}
- name: host-root
hostPath:
path: /
{{- end -}}
{{- define "traffic-engine.volume.localtime" -}}
- name: localtime-node
hostPath:
path: /etc/localtime
{{- end -}}
{{- define "traffic-engine.inject_adapter.mount.hostpath" -}}
{{- if eq .Values.debug.inject_adapter.enable_mount_host_filesystem .Values.define_enable_val_yes }}
- name: host-root
mountPath: /host
{{- end }}
{{- end -}}
{{- define "traffic-engine.inject_adapter.volume.prestart" -}}
{{- if eq .Values.debug.inject_adapter.enable_prestart_script .Values.define_enable_val_yes }}
- name: inject-adapter-prestart
hostPath:
{{- if .Values.debug.inject_adapter.prestart_script }}
path: {{ .Values.debug.inject_adapter.prestart_script }}
{{- else }}
path: /etc/tsg-os/{{ .Release.Name }}/inject_adapter_prestart_script.sh
{{- end }}
type: FileOrCreate
{{- end }}
{{- end -}}
{{- define "traffic-engine.inject_adapter.mount.prestart" -}}
{{- if eq .Values.debug.inject_adapter.enable_prestart_script .Values.define_enable_val_yes }}
- name: prestart-dir
mountPath: /tmp/prestart
- name: inject-adapter-prestart
mountPath: /opt/tsg/scripts/prestart.sh
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: inject-adapter
namespace: tsg-os-system
data:
values.yaml: |
inject_adapter:
enable: {{ toYaml .Values.inject_adapter.enable}}
inject_adapter_affinity:
{{ toYaml .Values.inject_adapter_affinity | indent 6 }}
inject_adapter_config:
inject_adapter_nic:
{{ toYaml .Values.inject_adapter_config.inject_adapter_nic | indent 8 }}

View File

@@ -0,0 +1,111 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ .Release.Name }}
labels:
app: {{ .Release.Name }}
component: inject-adapter
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
selector:
matchLabels:
app: {{ .Release.Name }}
strategy:
type: Recreate
template:
metadata:
labels:
app: {{ .Release.Name }}
serviceFunction: {{ .Release.Name }}
component: inject_adapter
annotations:
prometheus.io/port: "9009"
prometheus.io/scrape: "true"
spec:
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: inject-adapter
image: "registry.gdnt-cloud.website/tsg/os/inject-adapter:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
workingDir: /opt/tsg/packet_adapter
command:
- "bash"
- "-ec"
- |
/usr/local/bin/entrypoint.sh \
{{- if .Values.debug.inject_adapter.enable_prestart_script }}
--enable_prestart \
{{- end }}
{{- if .Values.debug.inject_adapter.enable_interactive_startup }}
--enable_interactive_startup \
{{- end }}
|| echo "Failed to start."
ports:
- containerPort: 9009
env:
- name: DEPLOYMENT_NAME
value: inject-adapter
- name: SERVICENAME
value: {{ .Release.Name }}-announce-port
- name: NODE_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: MRZCPD_CTRLMSG_LISTEN_ADDR
valueFrom:
fieldRef:
fieldPath: status.hostIP
- name: OVERRIDE_SLED_IP
valueFrom:
fieldRef:
fieldPath: status.hostIP
{{- if .Values.debug.inject_adapter.enable_liveness_probe }}
livenessProbe:
tcpSocket:
port: 9009
failureThreshold: 1
timeoutSeconds: 10
startupProbe:
tcpSocket:
port: 9009
failureThreshold: 30
periodSeconds: 10
{{- end }}
securityContext:
privileged: true
volumeMounts:
- name: inject-adapter
mountPath: "/templates/values.yaml"
subPath: "values.yaml"
- name: inject-log
mountPath: /opt/tsg/packet_adapter/log
{{ template "traffic-engine.mount.mrzcpd" . }}
{{ template "traffic-engine.mount.localtime" . }}
{{ template "traffic-engine.inject_adapter.mount.prestart" . }}
{{ template "traffic-engine.inject_adapter.mount.hostpath" . }}
hostNetwork: true
volumes:
- name: inject-adapter
configMap:
name: inject-adapter
- name: prestart-dir
hostPath:
path: /etc/tsg-os/{{ .Release.Name }}/
type: DirectoryOrCreate
- name: inject-log
hostPath:
path: /var/log/{{ .Release.Name }}/inject/
{{ template "traffic-engine.volume.mrzcpd" . }}
{{ template "traffic-engine.volume.localtime" . }}
{{ template "traffic-engine.inject_adapter.volume.prestart" . }}
{{ template "traffic-engine.volume.hostpath" . }}

View File

@@ -0,0 +1,100 @@
# Default values for helm.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
inject_adapter_affinity: [95]
define_enable_val_yes: yes
inject_adapter:
enable: yes
inject_adapter_config:
inject_adapter_nic: nf_1_shaping_engine
debug:
inject_adapter:
enable_liveness_probe: yes
enable_interactive_startup: no
enable_prestart_script: no
enable_mount_host_filesystem: no
#default: /etc/tsg-os/${service_function_name}/shaping_prestart_script.sh
prestart_script: ""
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -0,0 +1,3 @@
rule_target := log-collector
rule_prerequisites :=
rule_recipes := $(call render_manifest_from_env,$(rule_target),log-collector.yaml)

View File

@@ -0,0 +1,41 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: log-collector
labels:
app: log-collector
namespace: tsg-os-system
spec:
selector:
matchLabels:
app: log-collector
template:
metadata:
labels:
app: log-collector
spec:
containers:
- name: log-collector
image: registry.gdnt-cloud.website/tsg/os/log-collector:{{ APP_VERSION }}
command:
- "bash"
- "-ec"
- |
/usr/local/bin/entrypoint.sh \
|| echo "Failed to start."
volumeMounts:
- name: logs
mountPath: /var/log/pods
- name: journal-volume
mountPath: /run/systemd/journal
securityContext:
privileged: true
volumes:
- name: logs
hostPath:
path: /var/log/pods
type: DirectoryOrCreate
- name: journal-volume
hostPath:
path: /run/systemd/journal
type: Directory

View File

@@ -0,0 +1,3 @@
rule_target := nic-uio-binder
rule_prerequisites :=
rule_recipes := $(call render_manifest_from_env,$(rule_target),nic-uio-binder.yaml)

View File

@@ -0,0 +1,146 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nic-uio-binder-account
namespace: tsg-os-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nic-uio-binder-account-nodes
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nic-uio-binder-account-pods
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","list","patch","watch","update","create","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: nic-uio-binder-account-services
rules:
- apiGroups: [""]
resources: ["endpoints", "services"]
verbs: ["get","list","watch","update","create","delete"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nic-uio-binder-account-binding-nodes
subjects:
- kind: ServiceAccount
namespace: tsg-os-system
name: nic-uio-binder-account
apiGroup: ""
roleRef:
kind: ClusterRole
name: nic-uio-binder-account-nodes
apiGroup: ""
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nic-uio-binder-account-binding-pods
subjects:
- kind: ServiceAccount
namespace: tsg-os-system
name: nic-uio-binder-account
apiGroup: ""
roleRef:
kind: ClusterRole
name: nic-uio-binder-account-pods
apiGroup: ""
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: nic-uio-binder-account-binding-services
subjects:
- kind: ServiceAccount
namespace: tsg-os-system
name: nic-uio-binder-account
apiGroup: ""
roleRef:
kind: ClusterRole
name: nic-uio-binder-account-services
apiGroup: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nic-uio-binder
labels:
app: nic-uio-binder
namespace: tsg-os-system
spec:
selector:
matchLabels:
app: nic-uio-binder
template:
metadata:
labels:
app: nic-uio-binder
spec:
serviceAccountName: nic-uio-binder-account
containers:
- name: nic-uio-binder
image: registry.gdnt-cloud.website/tsg/os/nic-uio-binder:{{ APP_VERSION }}
command:
- "bash"
- "-ec"
- |
/usr/local/bin/entrypoint.sh \
|| echo "Failed to start."
volumeMounts:
- name: hwfile
mountPath: /var/run/mrzcpd
- name: dev-vfio
mountPath: /dev/vfio
- name: k3s-api
mountPath: /var/lib/kubelet
- name: modules
mountPath: /lib/modules
securityContext:
privileged: true
initContainers:
- name: init-nic-uio-binder
image: registry.gdnt-cloud.website/tsg/os/nic-uio-binder:{{ APP_VERSION }}
command:
- "bash"
- "-ec"
- |
/opt/tsg/nic-uio-binder/bin/generateHwfile
volumeMounts:
- name: hwfile
mountPath: /var/run/mrzcpd
- name: dev-vfio
mountPath: /dev/vfio
- name: k3s-api
mountPath: /var/lib/kubelet
- name: modules
mountPath: /lib/modules
securityContext:
privileged: true
volumes:
- name: hwfile
hostPath:
path: /var/run/mrzcpd
type: DirectoryOrCreate
- name: dev-vfio
hostPath:
path: /dev/vfio
- name: k3s-api
hostPath:
path: /var/lib/kubelet/
- name: modules
hostPath:
path: /lib/modules
hostNetwork: true

View File

@@ -0,0 +1,3 @@
rule_target := packet-io-engine
rule_prerequisites :=
rule_recipes := $(call build_helmchart_package,$(rule_target),$(VERSION),$(APP_VERSION))

View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,24 @@
apiVersion: v2
name: mrzcpd
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -0,0 +1,21 @@
{{- define "mrzcpd.volume.prestart" -}}
{{- if .Values.mrzcpd.debug.enable_prestart_script }}
- name: mrzcpd-prestart
hostPath:
{{- if .Values.mrzcpd.debug.prestart_script }}
path: {{ .Values.mrzcpd.debug.prestart_script }}
{{- else }}
path: /etc/tsg-os/{{ .Release.Name }}/packet_io_engine_prestart_script.sh
{{- end }}
type: FileOrCreate
{{- end }}
{{- end -}}
{{- define "mrzcpd.mount.prestart" -}}
{{- if .Values.mrzcpd.debug.enable_prestart_script }}
- name: prestart-dir
mountPath: /tmp/prestart
- name: mrzcpd-prestart
mountPath: /opt/tsg/scripts/prestart.sh
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: packet-io-engine-reload-conf
data:
values.yaml: |
obp_devices:
{{ toYaml .Values.mrzcpd.obp_devices | indent 6}}
ruleset:
{{ toYaml .Values.mrzcpd.ruleset | indent 6}}

View File

@@ -0,0 +1,55 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: packet-io-engine-conf
data:
values.yaml: |
mrzcpd:
interfaces:
{{ toYaml .Values.mrzcpd.interfaces | indent 8 }}
virtual_interfaces:
{{ toYaml .Values.mrzcpd.virtual_interfaces | indent 8 }}
services:
{{ toYaml .Values.mrzcpd.services| indent 8 }}
virtual_wires:
{{ toYaml .Values.mrzcpd.virtual_wires| indent 8 }}
service_lb:
{{ toYaml .Values.mrzcpd.service_lb| indent 8 }}
service_chaining:
{{ toYaml .Values.mrzcpd.service_chaining| indent 8 }}
traffic_mirror:
{{ toYaml .Values.mrzcpd.traffic_mirror| indent 8 }}
distmode:
{{ toYaml .Values.mrzcpd.distmode| indent 8 }}
nf_count:
{{ toYaml .Values.mrzcpd.nf_count| indent 8 }}
nr_max_ef_adapters:
{{ toYaml .Values.mrzcpd.nr_max_ef_adapters| indent 8 }}
nr_max_vwires:
{{ toYaml .Values.mrzcpd.nr_max_vwires| indent 8 }}
nr_max_tera_adapters:
{{ toYaml .Values.mrzcpd.nr_max_tera_adapters| indent 8 }}
nr_max_link_dbs:
{{ toYaml .Values.mrzcpd.nr_max_link_dbs| indent 8 }}
debug:
{{ toYaml .Values.mrzcpd.debug| indent 8 }}
cpu_affinity:
{{ toYaml .Values.mrzcpd.cpu_affinity| indent 8 }}
hugepages:
{{ toYaml .Values.mrzcpd.hugepages| indent 8 }}
linkdb:
{{ toYaml .Values.mrzcpd.linkdb| indent 8 }}
pktmbuf_max_size:
{{ toYaml .Values.mrzcpd.pktmbuf_max_size| indent 8 }}
pktmbuf_queue_size:
{{ toYaml .Values.mrzcpd.pktmbuf_queue_size| indent 8 }}
pktmbuf_pool_create_mode:
{{ toYaml .Values.mrzcpd.pktmbuf_pool_create_mode| indent 8 }}
pktmbuf_pool_max_elements_count:
{{ toYaml .Values.mrzcpd.pktmbuf_pool_max_elements_count| indent 8 }}
enable_check_pktmbuf_leak:
{{ toYaml .Values.mrzcpd.enable_check_pktmbuf_leak| indent 8 }}
enable_poll_wait_throttle:
{{ toYaml .Values.mrzcpd.enable_poll_wait_throttle| indent 8 }}
indirect_pktmbuf:
{{ toYaml .Values.mrzcpd.indirect_pktmbuf| indent 8 }}

View File

@@ -0,0 +1,386 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: packet-io-engine
labels:
app: packet-io-engine
component: packet-io-engine
annotations:
configmap.reloader.stakater.com/reload: packet-io-engine-conf
spec:
replicas: 1
selector:
matchLabels:
app: packet-io-engine
strategy:
type: Recreate
template:
metadata:
labels:
app: packet-io-engine
serviceFunction: {{ .Release.Name }}
component: packet-io-engine
annotations:
kubectl.kubernetes.io/default-container: packet-io-engine
prometheus.io/port: "9005"
prometheus.io/scrape: "true"
spec:
shareProcessNamespace: true
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: inotify-dynamic-conf
image: "registry.gdnt-cloud.website/tsg/os/packet-io-engine:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
command:
- "bash"
- "-ec"
- |
/usr/local/bin/j2 -f yaml /templates/mrglobal.dynamic.conf.j2 /templates/values.yaml -o /opt/tsg/config/shared/mrglobal.dynamic.conf
WATCH_DIR="/templates/values.yaml"
while inotifywait -r -e modify,create "$WATCH_DIR"; do
echo "send HUB signal to mrzcpd"
/usr/local/bin/j2 -f yaml /templates/mrglobal.dynamic.conf.j2 /templates/values.yaml -o /opt/tsg/config/shared/mrglobal.dynamic.conf
pkill -1 mrzcpd
echo "signal send"
done
volumeMounts:
- name: shared-dir
mountPath: /opt/tsg/config/shared
- name: packet-io-engine-reload-conf
mountPath: /templates/values.yaml
subPath: "values.yaml"
securityContext:
privileged: true
lifecycle:
postStart:
exec:
command:
- "bash"
- "-ec"
- |
while true; do
if [ -f "/opt/tsg/config/shared/mrglobal.dynamic.conf" ]; then
echo "File /opt/tsg/config/shared/mrglobal.dynamic.conf exists. Exiting."
exit 0
fi
echo "File /opt/tsg/config/shared/mrglobal.dynamic.conf does not exist. Sleeping for 2 seconds."
sleep 2
done
- name: packet-io-engine
image: "registry.gdnt-cloud.website/tsg/os/packet-io-engine:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
workingDir: /opt/tsg/mrzcpd
command:
- "bash"
- "-ec"
- |
/opt/tsg/mrzcpd/bin/mrmarch --auto
mount --rbind /opt/tsg/mrzcpd /mnt/packet-io-engine/mrzcpd
cp -f /etc/ld.so.conf.d/mrzcpd.conf /mnt/packet-io-engine/ld.so.conf.d/mrzcpd.conf
cp -f /etc/profile.d/mrzcpd.sh /mnt/packet-io-engine/profile.d/mrzcpd.sh
ldconfig -C /etc/ld.so.cache -r /host
ln -s /opt/tsg/mrzcpd/shared/mrglobal.dynamic.conf /opt/tsg/mrzcpd/etc/mrglobal.dynamic.conf
/usr/local/bin/entrypoint.sh \
{{- if .Values.mrzcpd.debug.enable_prestart_script }}
--enable_prestart \
{{- end }}
{{- if .Values.mrzcpd.debug.enable_interactive_startup }}
--enable_interactive_startup \
{{- end }}
|| echo "Failed to start."
volumeMounts:
- name: journal-volume
mountPath: /run/systemd/journal
- name: shared-dir
mountPath: /opt/tsg/mrzcpd/shared
- name: mrzcpd
mountPath: /mnt/packet-io-engine/mrzcpd
mountPropagation: Bidirectional
- name: ldconfig-mrzcpd
mountPath: /mnt/packet-io-engine/ld.so.conf.d
- name: etc-profile
mountPath: /mnt/packet-io-engine/profile.d
- name: packet-io-engine-conf
mountPath: /templates/values.yaml
subPath: "values.yaml"
- name: run-mrzcpd
mountPath: /var/run/mrzcpd
- name: hugepages
mountPath: /var/run/mrzcpd/hugepages
- name: dpdk
mountPath: /var/run/dpdk
- name: dev-vfio
mountPath: /dev/vfio
- name: host-root
mountPath: /host
mountPropagation: HostToContainer
- name: localtime-node
mountPath: /etc/localtime
readOnly: true
{{ template "mrzcpd.mount.prestart" . }}
resources:
limits:
nic-uio/packet-io-engine: 1
{{- if .Values.mrzcpd.interfaces }}
{{- range $interface := .Values.mrzcpd.interfaces }}
nic-uio/{{ $interface.name }}: 1
{{- end }}
{{- end }}
hugepages-1Gi: {{.Values.mrzcpd.hugepages }}
requests:
memory: 100Mi
env:
- name: MLX5_GLUE_PATH
value: /opt/tsg/mrzcpd/lib
- name: DEPLOYMENT_NAME
value: packet-io-engine
{{- if .Values.mrzcpd.debug.enable_liveness_probe }}
livenessProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
startupProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
{{- end }}
lifecycle:
preStop:
exec:
command:
- "bash"
- "-ec"
- |
rm -f /run/mrzcpd/mrmonit.daemon
rm -f /run/mrzcpd/huge_pages/rtemap_*
rm -f /run/.rte_config
rm -f /run/.rte_hugepage_info
rm -rf /run/.dpdk
securityContext:
privileged: true
- command:
- "bash"
- "-ec"
- |
until [ $(curl -s -o /dev/null -w "%{http_code}" http://localhost:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done
/opt/tsg/mrzcpd/bin/monit_device --prometheus-client
image: "registry.gdnt-cloud.website/tsg/os/packet-io-engine:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
name: mrapm-device
{{- if .Values.mrzcpd.debug.enable_liveness_probe}}
livenessProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
startupProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
{{- end }}
volumeMounts:
- mountPath: /var/run/mrzcpd
name: run-mrzcpd
readOnly: true
- mountPath: /var/run/dpdk
name: dpdk
readOnly: true
- mountPath: /etc/localtime
name: localtime-node
readOnly: true
- command:
- "bash"
- "-ec"
- |
until [ $(curl -s -o /dev/null -w "%{http_code}" http://localhost:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done
/opt/tsg/mrzcpd/bin/monit_stream --prometheus-client
image: "registry.gdnt-cloud.website/tsg/os/packet-io-engine:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
name: monit-stream
{{- if .Values.mrzcpd.debug.enable_liveness_probe }}
livenessProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
startupProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
{{- end }}
volumeMounts:
- mountPath: /var/run/mrzcpd
name: run-mrzcpd
readOnly: true
- mountPath: /var/run/dpdk
name: dpdk
readOnly: true
- mountPath: /etc/localtime
name: localtime-node
readOnly: true
- command:
- "bash"
- "-ec"
- |
until [ $(curl -s -o /dev/null -w "%{http_code}" http://localhost:9086/probe) -eq 200 ]; do echo waiting for packet-io-engine ready; sleep 2; done
/opt/tsg/mrzcpd/bin/monit_obp --prometheus-client
image: "registry.gdnt-cloud.website/tsg/os/packet-io-engine:{{ .Chart.AppVersion }}"
imagePullPolicy: Never
name: monit-obp
{{- if .Values.mrzcpd.debug.enable_liveness_probe }}
livenessProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
startupProbe:
httpGet:
host: 127.0.0.1
httpHeaders:
- name: Custom-Header
value: Awesome
path: /probe
port: 9086
initialDelaySeconds: 20
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
{{- end }}
volumeMounts:
- mountPath: /var/run/mrzcpd
name: run-mrzcpd
readOnly: true
- mountPath: /var/run/dpdk
name: dpdk
readOnly: true
- mountPath: /etc/localtime
name: localtime-node
readOnly: true
- env:
- name: MERGER_URLS
value: {{ .Values.mergeExporter.mergeUrls }}
- name: MERGER_PORT
value: "{{ .Values.mergeExporter.mergePort }}"
image: "{{ .Values.mergeExporter.image.repository }}:{{ .Values.mergeExporter.image.tag }}"
imagePullPolicy: {{ .Values.mergeExporter.image.pullPolicy }}
name: exporter-merger
ports:
- name: ex-mrzcpd-port
containerPort: {{ .Values.mergeExporter.mergePort }}
protocol: TCP
volumes:
- name: journal-volume
hostPath:
path: /run/systemd/journal
type: Directory
- name: shared-dir
emptyDir: {}
- name: prestart-dir
hostPath:
path: /etc/tsg-os/{{ .Release.Name }}/
type: DirectoryOrCreate
- name: packet-io-engine-conf
configMap:
name: packet-io-engine-conf
- name: packet-io-engine-reload-conf
configMap:
name: packet-io-engine-reload-conf
- name: mrzcpd
hostPath:
path: /opt/tsg/mrzcpd
type: DirectoryOrCreate
- name: run-mrzcpd
hostPath:
path: /var/run/mrzcpd
type: DirectoryOrCreate
- name: hugepages
hostPath:
path: /var/run/mrzcpd/hugepages
type: DirectoryOrCreate
- name: etc-profile
hostPath:
path: /etc/profile.d
type: Directory
- name: dpdk
hostPath:
path: /var/run/dpdk
type: DirectoryOrCreate
- name: ldconfig-mrzcpd
hostPath:
path: /etc/ld.so.conf.d
type: Directory
- name: dev-vfio
hostPath:
path: /dev/vfio
- name: localtime-node
hostPath:
path: /etc/localtime
- name: host-root
hostPath:
path: /
{{ template "mrzcpd.volume.prestart" . }}
hostNetwork: true

View File

@@ -0,0 +1,20 @@
apiVersion: v1
kind: Service
metadata:
labels:
app: packet-io-engine
name: packet-io-engine
annotations:
prometheus.io/scrape: "true"
spec:
ports:
- name: mrzcpd-exporter-port
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
port: {{ .Values.service.port }}
targetPort: ex-mrzcpd-port
selector:
app: packet-io-engine
type: {{ .Values.service.type }}

View File

@@ -0,0 +1,124 @@
# Default values for mrzcpd.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
enable_interactive_startup: no
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
limits:
hugepages-1Gi: 32Gi
requests:
memory: 100Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
mergeExporter:
image:
repository: quay.io/rebuy/exporter-merger
pullPolicy: Never
tag: "v0.2.0"
mergePort: "9005"
mergeUrls: "http://127.0.0.1:8901/metrics http://127.0.0.1:8902/metrics http://127.0.0.1:8903/metrics"
service:
type: ClusterIP
# nodePort: "30081"
port: "9005"
annotations: {}
define_enable_val_yes: yes
mrzcpd:
interfaces: []
nf_count: 16
services: []
virtual_wires: []
service_lb: {}
service_chaining: {}
traffic_mirror: {}
distmode: 0
datapath_trace: {}
hugepages: 32Gi
linkdb: []
obp_devices: []
pktmbuf_max_size: 4096
pktmbuf_queue_size: 512
pktmbuf_pool_create_mode: 1
pktmbuf_pool_max_elements_count: 2097151
enable_check_pktmbuf_leak: 1
enable_poll_wait_throttle: 512
indirect_pktmbuf: 8192
debug:
enable_liveness_probe: yes
define_enable_val_yes: yes
enable_prestart_script: no
enable_mount_host_filesystem: no
#default: /etc/tsg-os/${service_function_name}/shaping_prestart_script.sh
prestart_script: ""

3
helmcharts/trex/build.mk Normal file
View File

@@ -0,0 +1,3 @@
rule_target := trex
rule_prerequisites :=
rule_recipes := $(call build_helmchart_package,$(rule_target),$(VERSION),$(APP_VERSION))

View File

@@ -0,0 +1,6 @@
apiVersion: v2
appVersion: 1.16.0
description: A Helm chart for Kubernetes
name: trex
type: application
version: 0.1.0

View File

@@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: trex
spec:
replicas: 1
selector:
matchLabels:
app: trex
strategy:
type: Recreate
template:
metadata:
labels:
app: trex
spec:
hostNetwork: true
containers:
- name: trex
image: registry.gdnt-cloud.website/tsg/os/trex:{{ .Chart.AppVersion }}
command:
- "bash"
- "-ec"
- |
/usr/local/bin/entrypoint.sh \
|| echo "Failed to start."
securityContext:
privileged: true
resources:
limits:
hugepages-1Gi: {{.Values.trex.hugepages }}Gi
nic-uio/trex: 1
{{- if .Values.trex.interfaces }}
{{- range $interface := .Values.trex.interfaces }}
nic-uio/{{ $interface }}: 1
{{- end }}
{{- end }}
requests:
hugepages-1Gi: {{.Values.trex.hugepages }}Gi
memory: 100Mi
env:
- name: MASTER_IOCORE
value: {{.Values.trex.master |quote}}
- name: LATENCY_IOCORE
value: {{.Values.trex.latency |quote}}
- name: CPU_AFFINITY
value: {{ join "," .Values.trex.cpu_affinity }}
- name: TREX_PATH
value: /opt/tsg/trex/v3.02
- name: TREX_HELPER_PATH
value: /opt/tsg/trex-helper
- name: PYTHONPATH
value: /opt/tsg/trex/v3.02/automation/trex_control_plane/interactive/
volumeMounts:
- name: dev-vfio
mountPath: /dev/vfio
- name: modules
mountPath: /lib/modules
- name: mem
mountPath: /dev/mem
- name: hugepage-1gi
mountPath: /var/run/trex/hugepages
volumes:
- name: mem
hostPath:
path: /dev/mem
- name: modules
hostPath:
path: /lib/modules
- name: dev-vfio
hostPath:
path: /dev/vfio
- name: hugepage-1gi
emptyDir:
medium: HugePages-1Gi

View File

@@ -0,0 +1,4 @@
trex:
hugepages: 32Gi
cpu_affinity: []
interfaces: []