perf: 性能优化
* io_uring使用buffer pool避免内存分配与释放
* packet io thread与worker thread无锁访问cmsg
* 为解密流量的fd设置默认的TTL
This commit is contained in:
@@ -126,19 +126,29 @@ static void *worker_thread_cycle(void *arg)
|
||||
{
|
||||
struct packet_io_thread_ctx *thread_ctx = (struct packet_io_thread_ctx *)arg;
|
||||
struct packet_io *handle = thread_ctx->ref_io;
|
||||
void * logger = thread_ctx->logger;
|
||||
void *logger = thread_ctx->logger;
|
||||
|
||||
#define MAX_REBUFF_SIZE 2048
|
||||
char buffer[MAX_REBUFF_SIZE];
|
||||
int pkg_len = 0;
|
||||
char thread_name[16];
|
||||
int n_pkt_recv_from_nf = 0;
|
||||
int n_pkt_recv_from_tap = 0;
|
||||
int n_pkt_recv_from_tap_c = 0;
|
||||
int n_pkt_recv_from_tap_s = 0;
|
||||
int n_pkt_recv = 0;
|
||||
int thread_index = thread_ctx->thread_index;
|
||||
int using_iouring_mode = is_enable_iouring(handle);
|
||||
|
||||
snprintf(thread_name, sizeof(thread_name), "pkt:worker-%d", thread_ctx->thread_index);
|
||||
int fd_on_tap_0 = thread_ctx->tap_ctx->tap_fd;
|
||||
int fd_on_tap_c = thread_ctx->tap_ctx->tap_c;
|
||||
int fd_on_tap_s = thread_ctx->tap_ctx->tap_s;
|
||||
|
||||
struct io_uring_instance *io_uring_on_tap_0 = thread_ctx->tap_ctx->io_uring_fd;
|
||||
struct io_uring_instance *io_uring_on_tap_c = thread_ctx->tap_ctx->io_uring_c;
|
||||
struct io_uring_instance *io_uring_on_tap_s = thread_ctx->tap_ctx->io_uring_s;
|
||||
|
||||
snprintf(thread_name, sizeof(thread_name), "pkt:worker-%d", thread_index);
|
||||
prctl(PR_SET_NAME, (unsigned long long)thread_name, NULL, NULL, NULL);
|
||||
|
||||
while (!worker_thread_ready) {
|
||||
while (!worker_thread_ready)
|
||||
{
|
||||
sleep(1);
|
||||
}
|
||||
|
||||
@@ -147,56 +157,59 @@ static void *worker_thread_cycle(void *arg)
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
if (is_enable_iouring(handle)) {
|
||||
io_uring_register_read_callback(thread_ctx->tap_ctx->io_uring_fd, handle_raw_packet_from_tap, thread_ctx);
|
||||
io_uring_register_read_callback(thread_ctx->tap_ctx->io_uring_c, handle_decryption_packet_from_tap, thread_ctx);
|
||||
io_uring_register_read_callback(thread_ctx->tap_ctx->io_uring_s, handle_decryption_packet_from_tap, thread_ctx);
|
||||
}
|
||||
else {
|
||||
thread_ctx->tap_ctx->buff_size = 3000;
|
||||
thread_ctx->tap_ctx->buff = ALLOC(char, thread_ctx->tap_ctx->buff_size);
|
||||
if (using_iouring_mode)
|
||||
{
|
||||
io_uring_set_read_cb(io_uring_on_tap_0, handle_raw_packet_from_tap, thread_ctx);
|
||||
io_uring_set_read_cb(io_uring_on_tap_c, handle_decryption_packet_from_tap, thread_ctx);
|
||||
io_uring_set_read_cb(io_uring_on_tap_s, handle_decryption_packet_from_tap, thread_ctx);
|
||||
}
|
||||
|
||||
TFE_LOG_INFO(logger, "%s: worker thread %d is running", "LOG_TAG_KNI", thread_ctx->thread_index);
|
||||
TFE_LOG_INFO(logger, "%s: worker thread %d is running", "LOG_TAG_KNI", thread_index);
|
||||
|
||||
while(1) {
|
||||
n_pkt_recv_from_nf = packet_io_polling_nf_interface(handle, thread_ctx->thread_index, thread_ctx);
|
||||
if (is_enable_iouring(handle)) {
|
||||
n_pkt_recv_from_tap = io_uring_peek_ready_entrys(thread_ctx->tap_ctx->io_uring_fd);
|
||||
n_pkt_recv_from_tap_c = io_uring_peek_ready_entrys(thread_ctx->tap_ctx->io_uring_c);
|
||||
n_pkt_recv_from_tap_s = io_uring_peek_ready_entrys(thread_ctx->tap_ctx->io_uring_s);
|
||||
while (1)
|
||||
{
|
||||
n_pkt_recv = packet_io_polling_nf_interface(handle, thread_index, thread_ctx);
|
||||
if (using_iouring_mode)
|
||||
{
|
||||
n_pkt_recv += io_uring_polling(io_uring_on_tap_0);
|
||||
n_pkt_recv += io_uring_polling(io_uring_on_tap_c);
|
||||
n_pkt_recv += io_uring_polling(io_uring_on_tap_s);
|
||||
}
|
||||
else {
|
||||
if ((pkg_len = tap_read(thread_ctx->tap_ctx->tap_fd, thread_ctx->tap_ctx->buff, thread_ctx->tap_ctx->buff_size, logger)) > 0)
|
||||
else
|
||||
{
|
||||
if ((pkg_len = tap_read(fd_on_tap_0, buffer, MAX_REBUFF_SIZE, logger)) > 0)
|
||||
{
|
||||
handle_raw_packet_from_tap(thread_ctx->tap_ctx->buff, pkg_len, thread_ctx);
|
||||
n_pkt_recv++;
|
||||
handle_raw_packet_from_tap(buffer, pkg_len, thread_ctx);
|
||||
}
|
||||
|
||||
if ((pkg_len = tap_read(thread_ctx->tap_ctx->tap_c, thread_ctx->tap_ctx->buff, thread_ctx->tap_ctx->buff_size, logger)) > 0)
|
||||
if ((pkg_len = tap_read(fd_on_tap_c, buffer, MAX_REBUFF_SIZE, logger)) > 0)
|
||||
{
|
||||
handle_decryption_packet_from_tap(thread_ctx->tap_ctx->buff, pkg_len, thread_ctx);
|
||||
n_pkt_recv++;
|
||||
handle_decryption_packet_from_tap(buffer, pkg_len, thread_ctx);
|
||||
}
|
||||
|
||||
if ((pkg_len = tap_read(thread_ctx->tap_ctx->tap_s, thread_ctx->tap_ctx->buff, thread_ctx->tap_ctx->buff_size, logger)) > 0)
|
||||
if ((pkg_len = tap_read(fd_on_tap_s, buffer, MAX_REBUFF_SIZE, logger)) > 0)
|
||||
{
|
||||
handle_decryption_packet_from_tap(thread_ctx->tap_ctx->buff, pkg_len, thread_ctx);
|
||||
n_pkt_recv++;
|
||||
handle_decryption_packet_from_tap(buffer, pkg_len, thread_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
if (n_pkt_recv_from_nf == 0 && n_pkt_recv_from_tap == 0 && n_pkt_recv_from_tap_c == 0 && n_pkt_recv_from_tap_s == 0)
|
||||
if (n_pkt_recv == 0)
|
||||
{
|
||||
packet_io_thread_wait(handle, thread_ctx, -1);
|
||||
}
|
||||
|
||||
if (__atomic_fetch_add(&thread_ctx->session_table_need_reset, 0, __ATOMIC_RELAXED) > 0)
|
||||
if (ATOMIC_READ(&thread_ctx->session_table_need_reset) > 0)
|
||||
{
|
||||
session_table_reset(thread_ctx->session_table);
|
||||
__atomic_fetch_and(&thread_ctx->session_table_need_reset, 0, __ATOMIC_RELAXED);
|
||||
ATOMIC_ZERO(&thread_ctx->session_table_need_reset);
|
||||
}
|
||||
}
|
||||
|
||||
error_out:
|
||||
TFE_LOG_ERROR(logger, "%s: worker thread %d exiting", LOG_TAG_SCE, thread_ctx->thread_index);
|
||||
TFE_LOG_ERROR(logger, "%s: worker thread %d exiting", LOG_TAG_SCE, thread_index);
|
||||
return (void *)NULL;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user