This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
tango-tfe/platform/src/acceptor_kni_v4.cpp

263 lines
9.0 KiB
C++
Raw Normal View History

2023-04-18 16:03:57 +08:00
#include <sys/prctl.h>
#include <unistd.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <linux/tcp.h>
#include <linux/netfilter.h> // for NF_ACCEPT
#include <libnetfilter_queue/libnetfilter_queue.h>
#include <linux/if_tun.h>
#include <MESA/MESA_prof_load.h>
2023-04-18 16:03:57 +08:00
#include <bpf_obj.h>
2023-04-18 16:03:57 +08:00
#include <tfe_utils.h>
#include <tfe_cmsg.h>
#include <proxy.h>
#include "io_uring.h"
#include "tfe_packet_io_fs.h"
2023-04-18 16:03:57 +08:00
#include "tfe_tcp_restore.h"
#include "acceptor_kni_v4.h"
#include "tap.h"
#include "tfe_packet_io.h"
#include "tfe_session_table.h"
2023-06-05 19:10:16 +08:00
#include "tfe_fieldstat.h"
2023-05-15 16:41:59 +08:00
void * g_packet_io_logger = NULL;
static int tap_read(int tap_fd, char *buff, int buff_size, void *logger)
{
int ret = read(tap_fd, buff, buff_size);
if (ret < 0)
{
if (errno != EWOULDBLOCK && errno != EAGAIN)
{
2023-05-15 16:41:59 +08:00
TFE_LOG_ERROR(logger, "%s: unable to read data from tapfd %d, aborting: %s", LOG_TAG_PKTIO, tap_fd, strerror(errno));
}
}
return ret;
}
2023-06-05 19:10:16 +08:00
static struct tfe_fieldstat_metric_t *create_fieldstat_instance(const char *profile, const char *section, int max_thread, void *logger)
{
int cycle=0;
unsigned short telegraf_port=0;
char telegraf_ip[TFE_STRING_MAX]={0};
char app_name[TFE_STRING_MAX]={0};
struct tfe_fieldstat_metric_t *dynamic_fieldstat=NULL;
MESA_load_profile_short_nodef(profile, section, "telegraf_port", (short *)&(telegraf_port));
MESA_load_profile_string_nodef(profile, section, "telegraf_ip", telegraf_ip, sizeof(telegraf_ip));
MESA_load_profile_string_def(profile, section, "app_name", app_name, sizeof(app_name), "metric");
MESA_load_profile_int_def(profile, section, "cycle", &cycle, 1000);
dynamic_fieldstat = tfe_fieldstat_metric_create(telegraf_ip, telegraf_port, app_name, cycle, max_thread, logger);
if (dynamic_fieldstat == NULL)
{
TFE_LOG_ERROR(logger, "tfe fieldstat init failed, error to create fieldstat metric.");
return NULL;
}
TFE_LOG_INFO(logger, "tfe fieldstat telegraf_ip : %s", telegraf_ip);
TFE_LOG_INFO(logger, "tfe fieldstat telegraf_port : %d", telegraf_port);
TFE_LOG_INFO(logger, "tfe fieldstat app_name : %s", app_name);
TFE_LOG_INFO(logger, "tfe fieldstat cycle : %d", cycle);
return dynamic_fieldstat;
}
2023-05-15 16:41:59 +08:00
void acceptor_kni_v4_destroy(struct acceptor_kni_v4 *ctx)
{
if (ctx)
{
packet_io_destory(ctx->io);
packet_io_fs_destory(ctx->packet_io_fs);
2023-06-05 19:10:16 +08:00
tfe_fieldstat_metric_destroy(ctx->metric);
free(ctx);
ctx = NULL;
}
return;
}
2023-05-15 16:41:59 +08:00
struct acceptor_kni_v4 *acceptor_ctx_create(const char *profile, void *logger)
{
struct acceptor_kni_v4 *ctx = ALLOC(struct acceptor_kni_v4, 1);
MESA_load_profile_int_def(profile, "PACKET_IO", "firewall_sids", (int *)&(ctx->firewall_sids), 1000);
MESA_load_profile_int_def(profile, "PACKET_IO", "proxy_sids", (int *)&(ctx->proxy_sids), 1001);
MESA_load_profile_int_def(profile, "PACKET_IO", "service_chaining_sids", (int *)&(ctx->sce_sids), 1002);
MESA_load_profile_int_def(profile, "PACKET_IO", "packet_io_debug", (int *)&(ctx->debug), 0);
MESA_load_profile_int_def(profile, "PACKET_IO", "packet_io_threads", (int *)&(ctx->nr_worker_threads), 8);
2023-05-06 18:48:01 +08:00
MESA_load_profile_uint_range(profile, "PACKET_IO", "packet_io_cpu_affinity_mask", TFE_THREAD_MAX, (unsigned int *)ctx->cpu_affinity_mask);
ctx->nr_worker_threads = MIN(ctx->nr_worker_threads, TFE_THREAD_MAX);
CPU_ZERO(&ctx->coremask);
for (int i = 0; i < ctx->nr_worker_threads; i++)
{
int cpu_id = ctx->cpu_affinity_mask[i];
CPU_SET(cpu_id, &ctx->coremask);
}
2023-05-15 16:41:59 +08:00
ctx->io = packet_io_create(profile, ctx->nr_worker_threads, &ctx->coremask, logger);
if (ctx->io == NULL)
{
goto error_out;
}
2023-05-15 16:41:59 +08:00
ctx->packet_io_fs = packet_io_fs_create(profile);
if (ctx->packet_io_fs == NULL)
{
goto error_out;
}
2023-06-05 19:10:16 +08:00
ctx->metric = create_fieldstat_instance(profile, "proxy_hits", ctx->nr_worker_threads, logger);
if(ctx->metric == NULL)
{
goto error_out;
}
return ctx;
error_out:
2023-05-15 16:41:59 +08:00
acceptor_kni_v4_destroy(ctx);
return NULL;
}
2023-04-18 16:03:57 +08:00
static void *worker_thread_cycle(void *arg)
{
struct packet_io_thread_ctx *thread_ctx = (struct packet_io_thread_ctx *)arg;
2023-04-18 16:03:57 +08:00
struct packet_io *handle = thread_ctx->ref_io;
void *logger = thread_ctx->logger;
2023-04-18 16:03:57 +08:00
#define MAX_REBUFF_SIZE 2048
char buffer[MAX_REBUFF_SIZE];
2023-04-18 16:03:57 +08:00
int pkg_len = 0;
char thread_name[16];
int n_pkt_recv = 0;
int thread_index = thread_ctx->thread_index;
int using_iouring_mode = is_enable_iouring(handle);
2023-04-18 16:03:57 +08:00
int fd_on_tap_0 = thread_ctx->tap_ctx->tap_fd;
int fd_on_tap_c = thread_ctx->tap_ctx->tap_c;
int fd_on_tap_s = thread_ctx->tap_ctx->tap_s;
struct io_uring_instance *io_uring_on_tap_0 = thread_ctx->tap_ctx->io_uring_fd;
struct io_uring_instance *io_uring_on_tap_c = thread_ctx->tap_ctx->io_uring_c;
struct io_uring_instance *io_uring_on_tap_s = thread_ctx->tap_ctx->io_uring_s;
snprintf(thread_name, sizeof(thread_name), "pkt:worker-%d", thread_index);
2023-04-18 16:03:57 +08:00
prctl(PR_SET_NAME, (unsigned long long)thread_name, NULL, NULL, NULL);
while (!worker_thread_ready)
{
sleep(1);
}
2023-05-15 16:41:59 +08:00
if (packet_io_thread_init(handle, thread_ctx, logger) != 0)
2023-04-18 16:03:57 +08:00
{
goto error_out;
}
if (using_iouring_mode)
{
io_uring_set_read_cb(io_uring_on_tap_0, handle_raw_packet_from_tap, thread_ctx);
io_uring_set_read_cb(io_uring_on_tap_c, handle_decryption_packet_from_tap, thread_ctx);
io_uring_set_read_cb(io_uring_on_tap_s, handle_decryption_packet_from_tap, thread_ctx);
2023-04-18 16:03:57 +08:00
}
TFE_LOG_INFO(logger, "%s: worker thread %d is running", "LOG_TAG_KNI", thread_index);
2023-04-18 16:03:57 +08:00
while (1)
{
n_pkt_recv = packet_io_polling_nf_interface(handle, thread_index, thread_ctx);
if (using_iouring_mode)
{
n_pkt_recv += io_uring_polling(io_uring_on_tap_0);
n_pkt_recv += io_uring_polling(io_uring_on_tap_c);
n_pkt_recv += io_uring_polling(io_uring_on_tap_s);
2023-04-18 16:03:57 +08:00
}
else
{
if ((pkg_len = tap_read(fd_on_tap_0, buffer, MAX_REBUFF_SIZE, logger)) > 0)
2023-04-18 16:03:57 +08:00
{
n_pkt_recv++;
handle_raw_packet_from_tap(buffer, pkg_len, thread_ctx);
2023-04-18 16:03:57 +08:00
}
if ((pkg_len = tap_read(fd_on_tap_c, buffer, MAX_REBUFF_SIZE, logger)) > 0)
{
n_pkt_recv++;
handle_decryption_packet_from_tap(buffer, pkg_len, thread_ctx);
}
2023-04-18 16:03:57 +08:00
if ((pkg_len = tap_read(fd_on_tap_s, buffer, MAX_REBUFF_SIZE, logger)) > 0)
{
n_pkt_recv++;
handle_decryption_packet_from_tap(buffer, pkg_len, thread_ctx);
}
2023-04-18 16:03:57 +08:00
}
if (n_pkt_recv == 0)
{
packet_io_thread_wait(handle, thread_ctx, -1);
}
2023-04-18 16:03:57 +08:00
if (ATOMIC_READ(&thread_ctx->session_table_need_reset) > 0)
2023-04-18 16:03:57 +08:00
{
session_table_reset(thread_ctx->session_table);
ATOMIC_ZERO(&thread_ctx->session_table_need_reset);
2023-04-18 16:03:57 +08:00
}
}
error_out:
TFE_LOG_ERROR(logger, "%s: worker thread %d exiting", LOG_TAG_SCE, thread_index);
2023-04-18 16:03:57 +08:00
return (void *)NULL;
}
2023-05-15 16:41:59 +08:00
struct acceptor_kni_v4 *acceptor_kni_v4_create(struct tfe_proxy *proxy, const char *profile)
2023-04-18 16:03:57 +08:00
{
2023-05-15 16:41:59 +08:00
void *packet_io_logger = NULL;
packet_io_logger = (void *)MESA_create_runtime_log_handle("packet_io", RLOG_LV_DEBUG);
2023-05-15 16:41:59 +08:00
assert(packet_io_logger != NULL);
2023-05-15 16:41:59 +08:00
g_packet_io_logger = packet_io_logger;
struct acceptor_kni_v4 *acceptor_ctx = acceptor_ctx_create(profile, packet_io_logger);
2023-04-18 16:03:57 +08:00
if (acceptor_ctx == NULL)
2023-05-22 15:19:29 +08:00
return NULL;
2023-04-18 16:03:57 +08:00
acceptor_ctx->ref_proxy = proxy;
for (int i = 0; i < acceptor_ctx->nr_worker_threads; i++) {
acceptor_ctx->work_threads[i].tid = 0;
acceptor_ctx->work_threads[i].thread_index = i;
2023-05-15 16:41:59 +08:00
acceptor_ctx->work_threads[i].ref_io = acceptor_ctx->io;
2023-04-18 16:03:57 +08:00
acceptor_ctx->work_threads[i].ref_acceptor_ctx = acceptor_ctx;
acceptor_ctx->work_threads[i].tap_ctx = tfe_tap_ctx_create(&acceptor_ctx->work_threads[i]);
if (acceptor_ctx->work_threads[i].tap_ctx == NULL)
goto error_out;
2023-04-18 16:03:57 +08:00
acceptor_ctx->work_threads[i].session_table = session_table_create();
acceptor_ctx->work_threads[i].ref_proxy = proxy;
acceptor_ctx->work_threads[i].ret_fs_state = acceptor_ctx->packet_io_fs;
2023-05-15 16:41:59 +08:00
acceptor_ctx->work_threads[i].logger = packet_io_logger;
2023-04-18 16:03:57 +08:00
acceptor_ctx->work_threads[i].session_table_need_reset = 0;
}
for (int i = 0; i < acceptor_ctx->nr_worker_threads; i++) {
struct packet_io_thread_ctx *thread_ctx = &acceptor_ctx->work_threads[i];
2023-04-18 16:03:57 +08:00
if (pthread_create(&thread_ctx->tid, NULL, worker_thread_cycle, (void *)thread_ctx) < 0)
{
goto error_out;
}
}
return acceptor_ctx;
2023-04-18 16:03:57 +08:00
error_out:
for (int i = 0; i < acceptor_ctx->nr_worker_threads; i++) {
tfe_tap_ctx_destory(acceptor_ctx->work_threads[i].tap_ctx);
session_table_destory(acceptor_ctx->work_threads[i].session_table);
}
acceptor_kni_v4_destroy(acceptor_ctx);
2023-04-18 16:03:57 +08:00
return NULL;
}