This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
tango-tsg-service-chaining-…/platform/src/global_metrics.cpp

253 lines
12 KiB
C++
Raw Normal View History

#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <MESA/MESA_prof_load.h>
#include "log.h"
#include "global_metrics.h"
// type, name, value
#define STAT_MAP(XX) \
/* device_nf */ \
XX(STAT_DEVICE_NF_RX_PKT, dev_nf_rx_P, nf_rx.n_pkts) \
XX(STAT_DEVICE_NF_RX_B, dev_nf_rx_B, nf_rx.n_bytes) \
XX(STAT_DEVICE_NF_TX_PKT, dev_nf_tx_P, nf_tx.n_pkts) \
XX(STAT_DEVICE_NF_TX_B, dev_nf_tx_B, nf_tx.n_bytes) \
XX(STAT_KEE_PKT_DOWN_RX_PKT, kee_d_rx_P, downlink_rx.n_pkts) \
XX(STAT_KEE_PKT_DOWN_RX_B, kee_d_rx_B, downlink_rx.n_bytes) \
XX(STAT_KEE_PKT_DOWN_TX_PKT, kee_d_tx_P, downlink_tx.n_pkts) \
XX(STAT_KEE_PKT_DOWN_TX_B, kee_d_tx_B, downlink_tx.n_bytes) \
/* raw traffic */ \
XX(STAT_RAW_TRAFFIC_RX_PKT, raw_rx_P, raw_rx.n_pkts) \
XX(STAT_RAW_TRAFFIC_RX_B, raw_rx_B, raw_rx.n_bytes) \
XX(STAT_RAW_TRAFFIC_TX_PKT, raw_tx_P, raw_tx.n_pkts) \
XX(STAT_RAW_TRAFFIC_TX_B, raw_tx_B, raw_tx.n_bytes) \
/* decrypted traffic */ \
XX(STAT_DECRYPTED_TRAFFIC_RX_PKT, dec_rx_P, dec_rx.n_pkts) \
XX(STAT_DECRYPTED_TRAFFIC_RX_B, dec_rx_B, dec_rx.n_bytes) \
XX(STAT_DECRYPTED_TRAFFIC_TX_PKT, dec_tx_P, dec_tx.n_pkts) \
XX(STAT_DECRYPTED_TRAFFIC_TX_B, dec_tx_B, dec_tx.n_bytes) \
/* device_endpoint_vxlan */ \
XX(STAT_DEVICE_ENDPOINT_VXLAN_RX_PKT, dev_l3_rx_P, endpoint_vxlan_rx.n_pkts) \
XX(STAT_DEVICE_ENDPOINT_VXLAN_RX_B, dev_l3_rx_B, endpoint_vxlan_rx.n_bytes) \
XX(STAT_DEVICE_ENDPOINT_VXLAN_TX_PKT, dev_l3_tx_P, endpoint_vxlan_tx.n_pkts) \
XX(STAT_DEVICE_ENDPOINT_VXLAN_TX_B, dev_l3_tx_B, endpoint_vxlan_tx.n_bytes) \
XX(STAT_DEVICE_ENDPOINT_VXLAN_DROP_PKT, dev_l3_dop_P, endpoint_vxlan_drop.n_pkts) \
XX(STAT_DEVICE_ENDPOINT_VXLAN_DROP_B, dev_l3_dop_B, endpoint_vxlan_drop.n_bytes) \
XX(STAT_KEE_PKT_UP_RX_PKT, kee_u_rx_P, uplink_rx.n_pkts) \
XX(STAT_KEE_PKT_UP_RX_B, kee_u_rx_B, uplink_rx.n_bytes) \
XX(STAT_KEE_PKT_UP_TX_DROP_PKT, kee_u_rxdop_P, uplink_tx_drop.n_pkts) \
XX(STAT_KEE_PKT_UP_TX_DROP_B, kee_u_rxdop_B, uplink_tx_drop.n_bytes) \
/* device_endpoint_vlan */ \
XX(STAT_DEVICE_ENDPOINT_VLAN_RX_PKT, dev_l2_rx_P, endpoint_vlan_rx.n_pkts) \
XX(STAT_DEVICE_ENDPOINT_VLAN_RX_B, dev_l2_rx_B, endpoint_vlan_rx.n_bytes) \
XX(STAT_DEVICE_ENDPOINT_VLAN_TX_PKT, dev_l2_tx_P, endpoint_vlan_tx.n_pkts) \
XX(STAT_DEVICE_ENDPOINT_VLAN_TX_B, dev_l2_tx_B, endpoint_vlan_tx.n_bytes) \
XX(STAT_DEVICE_ENDPOINT_VLAN_DROP_PKT, dev_l2_dop_P, endpoint_vlan_drop.n_pkts) \
XX(STAT_DEVICE_ENDPOINT_VLAN_DROP_B, dev_l2_dop_B, endpoint_vlan_drop.n_bytes) \
/* data_pkt_metrics */ \
XX(STAT_DATA_PKT_MIRR_BYPASS_PKT, mirr_bypass_P, mirr_bypass.n_pkts) \
XX(STAT_DATA_PKT_MIRR_BYPASS_B, mirr_bypass_B, mirr_bypass.n_bytes) \
XX(STAT_DATA_PKT_MIRR_BLOCK_PKT, mirr_block_P, mirr_block.n_pkts) \
XX(STAT_DATA_PKT_MIRR_BLOCK_B, mirr_block_B, mirr_block.n_bytes) \
XX(STAT_DATA_PKT_MIRR_RX_DROP_PKT, mirr_rxdop_P, mirr_rx_drop.n_pkts) \
XX(STAT_DATA_PKT_MIRR_RX_DROP_B, mirr_rxdop_B, mirr_rx_drop.n_bytes) \
XX(STAT_DATA_PKT_MIRR_TX_PKT, mirr_tx_P, mirr_tx.n_pkts) \
XX(STAT_DATA_PKT_MIRR_TX_B, mirr_tx_B, mirr_tx.n_bytes) \
XX(STAT_DATA_PKT_STEE_BYPASS_PKT, stee_bypass_P, stee_bypass.n_pkts) \
XX(STAT_DATA_PKT_STEE_BYPASS_B, stee_bypass_B, stee_bypass.n_bytes) \
XX(STAT_DATA_PKT_STEE_BLOCK_PKT, stee_block_P, stee_block.n_pkts) \
XX(STAT_DATA_PKT_STEE_BLOCK_B, stee_block_B, stee_block.n_bytes) \
XX(STAT_DATA_PKT_STEE_RX_PKT, stee_rx_P, stee_rx.n_pkts) \
XX(STAT_DATA_PKT_STEE_RX_B, stee_rx_B, stee_rx.n_bytes) \
XX(STAT_DATA_PKT_STEE_TX_PKT, stee_tx_P, stee_tx.n_pkts) \
XX(STAT_DATA_PKT_STEE_TX_B, stee_tx_B, stee_tx.n_bytes) \
XX(STAT_DATA_PKT_MISS_SESS_PKT, miss_sess_P, miss_sess.n_pkts) \
XX(STAT_DATA_PKT_MISS_SESS_B, miss_sess_B, miss_sess.n_bytes) \
XX(STAT_DATA_PKT_ERROR_BYPASS_PKT, err_bypass_P, error_bypass.n_pkts) \
XX(STAT_DATA_PKT_ERROR_BYPASS_B, err_bypass_B, error_bypass.n_bytes) \
XX(STAT_DATA_PKT_ERROR_BLOCK_PKT, err_block_P, error_block.n_pkts) \
XX(STAT_DATA_PKT_ERROR_BLOCK_B, err_block_B, error_block.n_bytes) \
/* sf_status_metrics */ \
XX(STAT_SF_STATUS_ACTIVE, sf_active, sf_active) \
XX(STAT_SF_STATUS_INACTIVE, sf_inactive, sf_inactive) \
/* ctrl_pkt_metrics */ \
XX(STAT_CTRL_PKT_RX_PKT, ctrl_rx_P, ctrl_rx.n_pkts) \
XX(STAT_CTRL_PKT_RX_B, ctrl_rx_B, ctrl_rx.n_bytes) \
XX(STAT_CTRL_PKT_TX_PKT, ctrl_tx_P, ctrl_tx.n_pkts) \
XX(STAT_CTRL_PKT_TX_B, ctrl_tx_B, ctrl_tx.n_bytes) \
XX(STAT_CTRL_PKT_OPENING, ctrl_opening, ctrl_opening) \
XX(STAT_CTRL_PKT_ACTIVE, ctrl_active, ctrl_active) \
XX(STAT_CTRL_PKT_CLOSING, ctrl_closing, ctrl_closing) \
XX(STAT_CTRL_PKT_RESETALL, ctrl_resetall, ctrl_resetall) \
XX(STAT_CTRL_PKT_ERROR, ctrl_error, ctrl_error) \
/* sf_session_metrics */ \
XX(STAT_SF_SESSION_NUM, curr_sessions, session_num) \
XX(STAT_SF_SESSION_LOG, session_logs, session_log) \
XX(STAT_SESSION_NEW, session_new, session_new) \
XX(STAT_SESSION_FREE, session_free, session_free) \
/* stateless inject */ \
XX(STAT_STATELESS_INJECT_PKT, stateless_inject_P, stateless_inject.n_pkts) \
XX(STAT_STATELESS_INJECT_B, stateless_inject_B, stateless_inject.n_bytes)
enum stat_type
{
#define XX(_type, _name, _value) _type,
STAT_MAP(XX)
#undef XX
STAT_MAX
};
static const char *stat_str[] =
{
#define XX(_type, _name, _value) #_name,
STAT_MAP(XX)
#undef XX
};
static uint64_t thread_metrics_get_value(struct thread_metrics *metrics, enum stat_type type)
{
switch (type)
{
#define XX(_type, _name, _value) \
case _type: \
return metrics->_value;
STAT_MAP(XX)
#undef XX
default:
return 0;
}
}
static void thread_metrics_add_value(struct thread_metrics *metrics, enum stat_type type, uint64_t value)
{
switch (type)
{
#define XX(_type, _name, _value) \
case _type: \
metrics->_value += value; \
break;
STAT_MAP(XX)
#undef XX
default:
break;
}
}
static void global_metrics_parse_config(const char *profile, struct metrics_config *config)
{
MESA_load_profile_string_def(profile, "STAT", "output_file", config->output_file, sizeof(config->output_file), "log/sce.fs4");
MESA_load_profile_int_def(profile, "STAT", "statsd_cycle", &(config->statsd_cycle), 1);
LOG_DEBUG("STAT->output_file : %s", config->output_file);
LOG_DEBUG("STAT->statsd_cycle : %d", config->statsd_cycle);
}
struct global_metrics *global_metrics_create(const char *profile, int thread_num)
{
struct global_metrics *global_metrics = (struct global_metrics *)calloc(1, sizeof(struct global_metrics));
assert(global_metrics != NULL);
global_metrics->thread_num = thread_num;
global_metrics->thread_metrics_flag = (int *)calloc(global_metrics->thread_num, sizeof(int));
global_metrics->thread_metrics_cache = (struct thread_metrics *)calloc(global_metrics->thread_num, sizeof(struct thread_metrics));
global_metrics_parse_config(profile, &global_metrics->config);
global_metrics->fs_handle = fieldstat_easy_new(1, "SCE", NULL, 0);
if (global_metrics->fs_handle == NULL)
{
LOG_ERROR("failed to create fieldstat_easy");
global_metrics_destory(global_metrics);
return NULL;
}
if (fieldstat_easy_enable_auto_output(global_metrics->fs_handle, global_metrics->config.output_file, global_metrics->config.statsd_cycle) != 0)
{
LOG_ERROR("failed to enable auto output for fieldstat_easy");
global_metrics_destory(global_metrics);
return NULL;
}
if (STAT_MAX >= (sizeof(global_metrics->fs_id) / sizeof(global_metrics->fs_id[0])))
{
LOG_ERROR("field stat has insufficient space to store fs_id, and supports a maximum of %lu fsids, but %d is needed ", (sizeof(global_metrics->fs_id) / sizeof(global_metrics->fs_id[0])), STAT_MAX);
global_metrics_destory(global_metrics);
return NULL;
}
for (int i = 0; i < STAT_MAX; i++)
{
global_metrics->fs_id[i] = fieldstat_easy_register_counter(global_metrics->fs_handle, stat_str[i]);
}
return global_metrics;
}
void global_metrics_destory(struct global_metrics *global_metrics)
{
if (global_metrics)
{
if (global_metrics->thread_metrics_flag)
{
free(global_metrics->thread_metrics_flag);
global_metrics->thread_metrics_flag = NULL;
}
if (global_metrics->thread_metrics_cache)
{
free(global_metrics->thread_metrics_cache);
global_metrics->thread_metrics_cache = NULL;
}
if (global_metrics->fs_handle)
{
fieldstat_easy_free(global_metrics->fs_handle);
global_metrics->fs_handle = NULL;
}
free(global_metrics);
global_metrics = NULL;
}
}
#define THREAD_METRICS_CACHE_IS_FREE 0
#define THREAD_METRICS_CACHE_IS_BUSY 0xf
void global_metrics_sync(struct global_metrics *global_metrics, struct thread_metrics *thread_metrics, int thread_id)
{
if (ATOMIC_READ(&(global_metrics->thread_metrics_flag[thread_id])) == THREAD_METRICS_CACHE_IS_FREE)
{
struct thread_metrics *ptr_metrics = &global_metrics->thread_metrics_cache[thread_id];
memcpy(ptr_metrics, thread_metrics, sizeof(struct thread_metrics));
memset(thread_metrics, 0, sizeof(struct thread_metrics));
ATOMIC_SET(&(global_metrics->thread_metrics_flag[thread_id]), THREAD_METRICS_CACHE_IS_BUSY);
}
}
void global_metrics_flush(struct global_metrics *global_metrics)
{
struct thread_metrics *sum = &global_metrics->sum;
struct thread_metrics last = *sum;
for (int i = 0; i < global_metrics->thread_num; i++)
{
if (ATOMIC_READ(&(global_metrics->thread_metrics_flag[i])) == THREAD_METRICS_CACHE_IS_BUSY)
{
struct thread_metrics *thread = &global_metrics->thread_metrics_cache[i];
for (int j = 0; j < STAT_MAX; j++)
{
uint64_t val = thread_metrics_get_value(thread, (enum stat_type)j);
thread_metrics_add_value(sum, (enum stat_type)j, val);
}
memset(thread, 0, sizeof(struct thread_metrics));
ATOMIC_SET(&(global_metrics->thread_metrics_flag[i]), THREAD_METRICS_CACHE_IS_FREE);
}
}
for (int i = 0; i < STAT_MAX; i++)
{
uint64_t delta = thread_metrics_get_value(sum, (enum stat_type)i) - thread_metrics_get_value(&last, (enum stat_type)i);
fieldstat_easy_counter_incrby(global_metrics->fs_handle, 0, global_metrics->fs_id[i], NULL, 0, delta);
}
}