perf: Optimize the output of global metrics to reduce atomic operations
This commit is contained in:
@@ -460,30 +460,30 @@ static int action_nf_inject(marsio_buff_t *rx_buff, struct metadata *meta, struc
|
||||
|
||||
static void action_err_bypass(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
int nsend = action_nf_inject(rx_buff, meta, sf, thread_ctx);
|
||||
if (nsend > 0)
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.error_bypass), 1, nsend);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.error_bypass), 1, nsend);
|
||||
}
|
||||
}
|
||||
|
||||
static void action_err_block(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct packet_io *packet_io = thread_ctx->ref_io;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.error_block), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.error_block), 1, raw_len);
|
||||
marsio_buff_free(packet_io->instance, &rx_buff, 1, 0, thread_index);
|
||||
}
|
||||
|
||||
// return nsend
|
||||
static int action_nf_inject(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct packet_io *packet_io = thread_ctx->ref_io;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
|
||||
@@ -496,29 +496,29 @@ static int action_nf_inject(marsio_buff_t *rx_buff, struct metadata *meta, struc
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
marsio_send_burst(packet_io->dev_nf_interface.mr_path, thread_index, &rx_buff, 1);
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_tx), 1, raw_len);
|
||||
return raw_len;
|
||||
}
|
||||
|
||||
static void action_mirr_bypass(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.mirr_bypass), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.mirr_bypass), 1, raw_len);
|
||||
}
|
||||
|
||||
static void action_mirr_block(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.mirr_block), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.mirr_block), 1, raw_len);
|
||||
}
|
||||
|
||||
static void action_mirr_forward(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct packet_io *packet_io = thread_ctx->ref_io;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
|
||||
@@ -536,39 +536,39 @@ static void action_mirr_forward(marsio_buff_t *rx_buff, struct metadata *meta, s
|
||||
memcpy(copy_ptr, raw_data, raw_len);
|
||||
|
||||
int nsend = send_packet_to_sf(new_buff, meta, sf, thread_ctx);
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_tx), 1, nsend);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.mirr_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_tx), 1, nsend);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.mirr_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&sf->tx, 1, nsend);
|
||||
sf_metrics_inc(thread_ctx->sf_metrics, sf->rule_vsys_id, sf->rule_id, sf->sff_profile_id, sf->sf_profile_id, 0, 0, 1, nsend);
|
||||
}
|
||||
|
||||
static void action_stee_bypass(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.stee_bypass), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.stee_bypass), 1, raw_len);
|
||||
}
|
||||
|
||||
static void action_stee_block(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct packet_io *packet_io = thread_ctx->ref_io;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.stee_block), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.stee_block), 1, raw_len);
|
||||
marsio_buff_free(packet_io->instance, &rx_buff, 1, 0, thread_index);
|
||||
}
|
||||
|
||||
static void action_stee_forward(marsio_buff_t *rx_buff, struct metadata *meta, struct selected_sf *sf, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
int raw_len = marsio_buff_datalen(rx_buff);
|
||||
int nsend = send_packet_to_sf(rx_buff, meta, sf, thread_ctx);
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_tx), 1, nsend);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.stee_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_tx), 1, nsend);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.stee_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&sf->tx, 1, nsend);
|
||||
sf_metrics_inc(thread_ctx->sf_metrics, sf->rule_vsys_id, sf->rule_id, sf->sff_profile_id, sf->sf_profile_id, 0, 0, 1, nsend);
|
||||
}
|
||||
@@ -748,7 +748,7 @@ static void send_event_log(struct session_ctx *session_ctx, struct thread_ctx *t
|
||||
{
|
||||
int nsend = 0;
|
||||
struct sce_ctx *sce_ctx = thread_ctx->ref_sce_ctx;
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct selected_chaining *chaining_raw = session_ctx->chainings.chaining_raw;
|
||||
struct selected_chaining *chaining_decrypted = session_ctx->chainings.chaining_decrypted;
|
||||
|
||||
@@ -757,9 +757,9 @@ static void send_event_log(struct session_ctx *session_ctx, struct thread_ctx *t
|
||||
nsend = send_ctrl_packet(session_ctx, chaining_raw, thread_ctx);
|
||||
if (nsend > 0)
|
||||
{
|
||||
ATOMIC_INC(&(g_metrics->sf_session.log));
|
||||
throughput_metrics_inc(&(g_metrics->ctrl_pkt.tx), 1, nsend);
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_tx), 1, nsend);
|
||||
ATOMIC_INC(&(thread_metrics->sf_session.log));
|
||||
throughput_metrics_inc(&(thread_metrics->ctrl_pkt.tx), 1, nsend);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_tx), 1, nsend);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -768,9 +768,9 @@ static void send_event_log(struct session_ctx *session_ctx, struct thread_ctx *t
|
||||
nsend = send_ctrl_packet(session_ctx, chaining_decrypted, thread_ctx);
|
||||
if (nsend > 0)
|
||||
{
|
||||
ATOMIC_INC(&(g_metrics->sf_session.log));
|
||||
throughput_metrics_inc(&(g_metrics->ctrl_pkt.tx), 1, nsend);
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_tx), 1, nsend);
|
||||
ATOMIC_INC(&(thread_metrics->sf_session.log));
|
||||
throughput_metrics_inc(&(thread_metrics->ctrl_pkt.tx), 1, nsend);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_tx), 1, nsend);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -819,7 +819,7 @@ static void handle_policy_mutil_hits(struct policy_enforcer *enforcer, struct se
|
||||
|
||||
static void handle_session_opening(struct metadata *meta, struct ctrl_pkt_parser *ctrl_parser, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct policy_enforcer *enforcer = thread_ctx->ref_enforcer;
|
||||
struct session_table *session_table = thread_ctx->session_table;
|
||||
int chaining_size = policy_enforce_chaining_size(enforcer);
|
||||
@@ -858,12 +858,12 @@ static void handle_session_opening(struct metadata *meta, struct ctrl_pkt_parser
|
||||
send_event_log(session_ctx, thread_ctx);
|
||||
|
||||
session_table_insert(session_table, session_ctx->session_id, &session_ctx->inner_tuple4, session_ctx, session_value_free_cb);
|
||||
ATOMIC_INC(&(g_metrics->sf_session.num));
|
||||
ATOMIC_INC(&(thread_metrics->sf_session.num));
|
||||
}
|
||||
|
||||
static void handle_session_closing(struct metadata *meta, struct ctrl_pkt_parser *ctrl_parser, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct session_table *session_table = thread_ctx->session_table;
|
||||
|
||||
struct session_node *node = session_table_search_by_id(session_table, meta->session_id);
|
||||
@@ -879,7 +879,7 @@ static void handle_session_closing(struct metadata *meta, struct ctrl_pkt_parser
|
||||
dump_sf_metrics(s_ctx, chaining_decrypted, "decrypted_traffic");
|
||||
|
||||
session_table_delete_by_id(session_table, meta->session_id);
|
||||
ATOMIC_DEC(&(g_metrics->sf_session.num));
|
||||
ATOMIC_DEC(&(thread_metrics->sf_session.num));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -914,11 +914,11 @@ static void handle_session_active(struct metadata *meta, struct ctrl_pkt_parser
|
||||
|
||||
static void handle_session_resetall(struct metadata *meta, struct ctrl_pkt_parser *ctrl_parser, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct sce_ctx *sce_ctx = thread_ctx->ref_sce_ctx;
|
||||
|
||||
LOG_ERROR("%s: session %lu resetall: notification clears all session tables !!!", LOG_TAG_PKTIO, meta->session_id);
|
||||
ATOMIC_ZERO(&(g_metrics->sf_session.num));
|
||||
ATOMIC_ZERO(&(thread_metrics->sf_session.num));
|
||||
for (int i = 0; i < sce_ctx->nr_worker_threads; i++)
|
||||
{
|
||||
struct thread_ctx *temp_ctx = &sce_ctx->work_threads[i];
|
||||
@@ -932,7 +932,7 @@ static void handle_session_resetall(struct metadata *meta, struct ctrl_pkt_parse
|
||||
|
||||
static void handle_control_packet(marsio_buff_t *rx_buff, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
struct metadata meta;
|
||||
struct ctrl_pkt_parser ctrl_parser;
|
||||
|
||||
@@ -958,20 +958,20 @@ static void handle_control_packet(marsio_buff_t *rx_buff, struct thread_ctx *thr
|
||||
switch (ctrl_parser.state)
|
||||
{
|
||||
case SESSION_STATE_OPENING:
|
||||
ATOMIC_INC(&(g_metrics->ctrl_pkt.opening));
|
||||
ATOMIC_INC(&(thread_metrics->ctrl_pkt.opening));
|
||||
// when session opening, firewall not send policy id
|
||||
// return handle_session_opening(&meta, &ctrl_parser, ctx);
|
||||
break;
|
||||
case SESSION_STATE_CLOSING:
|
||||
ATOMIC_INC(&(g_metrics->ctrl_pkt.closing));
|
||||
ATOMIC_INC(&(thread_metrics->ctrl_pkt.closing));
|
||||
handle_session_closing(&meta, &ctrl_parser, thread_ctx);
|
||||
break;
|
||||
case SESSION_STATE_ACTIVE:
|
||||
ATOMIC_INC(&(g_metrics->ctrl_pkt.active));
|
||||
ATOMIC_INC(&(thread_metrics->ctrl_pkt.active));
|
||||
handle_session_active(&meta, &ctrl_parser, thread_ctx);
|
||||
break;
|
||||
case SESSION_STATE_RESETALL:
|
||||
ATOMIC_INC(&(g_metrics->ctrl_pkt.resetall));
|
||||
ATOMIC_INC(&(thread_metrics->ctrl_pkt.resetall));
|
||||
handle_session_resetall(&meta, &ctrl_parser, thread_ctx);
|
||||
break;
|
||||
default:
|
||||
@@ -980,14 +980,14 @@ static void handle_control_packet(marsio_buff_t *rx_buff, struct thread_ctx *thr
|
||||
return;
|
||||
|
||||
error_ctrl_pkt:
|
||||
ATOMIC_INC(&(g_metrics->ctrl_pkt.error));
|
||||
ATOMIC_INC(&(thread_metrics->ctrl_pkt.error));
|
||||
return;
|
||||
}
|
||||
|
||||
static void handle_raw_packet(marsio_buff_t *rx_buff, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct session_table *session_table = thread_ctx->session_table;
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
struct metadata meta;
|
||||
struct session_ctx *session_ctx = NULL;
|
||||
@@ -1018,7 +1018,7 @@ static void handle_raw_packet(marsio_buff_t *rx_buff, struct thread_ctx *thread_
|
||||
session_ctx = raw_packet_search_session(session_table, meta.raw_data, meta.raw_len, meta.session_id);
|
||||
if (session_ctx == NULL)
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.miss_sess), 1, meta.raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.miss_sess), 1, meta.raw_len);
|
||||
goto error_bypass;
|
||||
}
|
||||
|
||||
@@ -1048,7 +1048,7 @@ error_bypass:
|
||||
static void handle_inject_packet(marsio_buff_t *rx_buff, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct session_table *session_table = thread_ctx->session_table;
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
|
||||
struct metadata meta;
|
||||
struct g_vxlan *g_vxlan_hdr = NULL;
|
||||
@@ -1061,7 +1061,7 @@ static void handle_inject_packet(marsio_buff_t *rx_buff, struct thread_ctx *thre
|
||||
char *raw_data = marsio_buff_mtod(rx_buff);
|
||||
if (g_vxlan_decode(&g_vxlan_hdr, raw_data, raw_len) == -1)
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_drop), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_drop), 1, raw_len);
|
||||
action_err_block(rx_buff, &meta, NULL, thread_ctx);
|
||||
return;
|
||||
}
|
||||
@@ -1102,14 +1102,14 @@ static void handle_inject_packet(marsio_buff_t *rx_buff, struct thread_ctx *thre
|
||||
{
|
||||
LOG_DEBUG("%s: unexpected inject packet, session %lu %s with sf_profile_id %d executes mirror and does not require reflow, drop !!!",
|
||||
LOG_TAG_PKTIO, session_ctx->session_id, session_ctx->session_addr, chaining->chaining[sf_index].sf_profile_id);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.mirr_rx_drop), 1, meta.raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.mirr_rx_drop), 1, meta.raw_len);
|
||||
goto error_block;
|
||||
}
|
||||
else
|
||||
{
|
||||
struct selected_sf *sf = &(chaining->chaining[sf_index]);
|
||||
throughput_metrics_inc(&sf->rx, 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->raw_pkt.stee_rx), 1, meta.raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->raw_pkt.stee_rx), 1, meta.raw_len);
|
||||
sf_metrics_inc(thread_ctx->sf_metrics, sf->rule_vsys_id, sf->rule_id, sf->sff_profile_id, sf->sf_profile_id, 1, raw_len, 0, 0);
|
||||
}
|
||||
|
||||
@@ -1118,7 +1118,7 @@ static void handle_inject_packet(marsio_buff_t *rx_buff, struct thread_ctx *thre
|
||||
return;
|
||||
|
||||
error_block:
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_drop), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_drop), 1, raw_len);
|
||||
marsio_buff_adj(rx_buff, raw_len - meta.raw_len);
|
||||
action_err_block(rx_buff, &meta, NULL, thread_ctx);
|
||||
}
|
||||
@@ -1323,7 +1323,7 @@ void packet_io_thread_wait(struct packet_io *handle, struct thread_ctx *thread_c
|
||||
|
||||
int packet_io_thread_polling_nf(struct packet_io *handle, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
|
||||
marsio_buff_t *rx_buffs[RX_BURST_MAX];
|
||||
@@ -1339,8 +1339,8 @@ int packet_io_thread_polling_nf(struct packet_io *handle, struct thread_ctx *thr
|
||||
{
|
||||
int raw_len = marsio_buff_datalen(rx_buffs[j]);
|
||||
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_tx), 1, raw_len);
|
||||
}
|
||||
|
||||
marsio_send_burst(handle->dev_nf_interface.mr_path, thread_index, rx_buffs, nr_recv);
|
||||
@@ -1354,28 +1354,28 @@ int packet_io_thread_polling_nf(struct packet_io *handle, struct thread_ctx *thr
|
||||
|
||||
if (is_downlink_keepalive_packet(rx_buff))
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_tx), 1, raw_len);
|
||||
|
||||
throughput_metrics_inc(&(g_metrics->kee_pkt.downlink_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->kee_pkt.downlink_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->kee_pkt.downlink_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->kee_pkt.downlink_tx), 1, raw_len);
|
||||
|
||||
marsio_send_burst(handle->dev_nf_interface.mr_path, thread_index, &rx_buff, 1);
|
||||
}
|
||||
else if (marsio_buff_is_ctrlbuf(rx_buff))
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_tx), 1, raw_len);
|
||||
|
||||
throughput_metrics_inc(&(g_metrics->ctrl_pkt.rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->ctrl_pkt.tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->ctrl_pkt.rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->ctrl_pkt.tx), 1, raw_len);
|
||||
|
||||
handle_control_packet(rx_buff, thread_ctx);
|
||||
marsio_send_burst(handle->dev_nf_interface.mr_path, thread_index, &rx_buff, 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->device.nf_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.nf_rx), 1, raw_len);
|
||||
|
||||
handle_raw_packet(rx_buff, thread_ctx);
|
||||
}
|
||||
@@ -1386,7 +1386,7 @@ int packet_io_thread_polling_nf(struct packet_io *handle, struct thread_ctx *thr
|
||||
|
||||
int packet_io_thread_polling_endpoint(struct packet_io *handle, struct thread_ctx *thread_ctx)
|
||||
{
|
||||
struct global_metrics *g_metrics = thread_ctx->ref_metrics;
|
||||
struct thread_metrics *thread_metrics = &thread_ctx->thread_metrics;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
|
||||
marsio_buff_t *rx_buffs[RX_BURST_MAX];
|
||||
@@ -1402,8 +1402,8 @@ int packet_io_thread_polling_endpoint(struct packet_io *handle, struct thread_ct
|
||||
{
|
||||
int raw_len = marsio_buff_datalen(rx_buffs[j]);
|
||||
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_tx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_tx), 1, raw_len);
|
||||
}
|
||||
|
||||
marsio_send_burst(handle->dev_endpoint.mr_path, thread_index, rx_buffs, nr_recv);
|
||||
@@ -1417,15 +1417,15 @@ int packet_io_thread_polling_endpoint(struct packet_io *handle, struct thread_ct
|
||||
|
||||
if (is_uplink_keepalive_packet(rx_buff))
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->kee_pkt.uplink_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(g_metrics->kee_pkt.uplink_tx_drop), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->kee_pkt.uplink_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->kee_pkt.uplink_tx_drop), 1, raw_len);
|
||||
|
||||
marsio_buff_free(handle->instance, &rx_buff, 1, 0, thread_index);
|
||||
}
|
||||
else
|
||||
{
|
||||
throughput_metrics_inc(&(g_metrics->device.endpoint_rx), 1, raw_len);
|
||||
throughput_metrics_inc(&(thread_metrics->device.endpoint_rx), 1, raw_len);
|
||||
|
||||
handle_inject_packet(rx_buff, thread_ctx);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user