#include #include #include #include #include #include // for NF_ACCEPT #include #include #include #include #include #include #include #include "io_uring.h" #include "tfe_packet_io_fs.h" #include "tfe_tcp_restore.h" #include "acceptor_kni_v4.h" #include "tap.h" #include "tfe_packet_io.h" #include "tfe_session_table.h" #include "tfe_fieldstat.h" void * g_packet_io_logger = NULL; static int tap_read(int tap_fd, char *buff, int buff_size, void *logger) { int ret = read(tap_fd, buff, buff_size); if (ret < 0) { if (errno != EWOULDBLOCK && errno != EAGAIN) { TFE_LOG_ERROR(logger, "%s: unable to read data from tapfd %d, aborting: %s", LOG_TAG_PKTIO, tap_fd, strerror(errno)); } } return ret; } static struct tfe_fieldstat_metric_t *create_fieldstat_instance(const char *profile, const char *section, int max_thread, void *logger) { int cycle=0; unsigned short telegraf_port=0; char telegraf_ip[TFE_STRING_MAX]={0}; char app_name[TFE_STRING_MAX]={0}; struct tfe_fieldstat_metric_t *dynamic_fieldstat=NULL; MESA_load_profile_short_nodef(profile, section, "telegraf_port", (short *)&(telegraf_port)); MESA_load_profile_string_nodef(profile, section, "telegraf_ip", telegraf_ip, sizeof(telegraf_ip)); MESA_load_profile_string_def(profile, section, "app_name", app_name, sizeof(app_name), "metric"); MESA_load_profile_int_def(profile, section, "cycle", &cycle, 1000); dynamic_fieldstat = tfe_fieldstat_metric_create(telegraf_ip, telegraf_port, app_name, cycle, max_thread, logger); if (dynamic_fieldstat == NULL) { TFE_LOG_ERROR(logger, "tfe fieldstat init failed, error to create fieldstat metric."); return NULL; } TFE_LOG_INFO(logger, "tfe fieldstat telegraf_ip : %s", telegraf_ip); TFE_LOG_INFO(logger, "tfe fieldstat telegraf_port : %d", telegraf_port); TFE_LOG_INFO(logger, "tfe fieldstat app_name : %s", app_name); TFE_LOG_INFO(logger, "tfe fieldstat cycle : %d", cycle); return dynamic_fieldstat; } void acceptor_kni_v4_destroy(struct acceptor_kni_v4 *ctx) { if (ctx) { packet_io_destory(ctx->io); packet_io_fs_destory(ctx->packet_io_fs); tfe_fieldstat_metric_destroy(ctx->metric); free(ctx); ctx = NULL; } return; } struct acceptor_kni_v4 *acceptor_ctx_create(const char *profile, void *logger) { struct acceptor_kni_v4 *ctx = ALLOC(struct acceptor_kni_v4, 1); MESA_load_profile_int_def(profile, "PACKET_IO", "firewall_sids", (int *)&(ctx->firewall_sids), 1000); MESA_load_profile_int_def(profile, "PACKET_IO", "proxy_sids", (int *)&(ctx->proxy_sids), 1001); MESA_load_profile_int_def(profile, "PACKET_IO", "service_chaining_sids", (int *)&(ctx->sce_sids), 1002); MESA_load_profile_int_def(profile, "PACKET_IO", "packet_io_debug", (int *)&(ctx->debug), 0); MESA_load_profile_int_def(profile, "PACKET_IO", "packet_io_threads", (int *)&(ctx->nr_worker_threads), 8); MESA_load_profile_uint_range(profile, "PACKET_IO", "packet_io_cpu_affinity_mask", TFE_THREAD_MAX, (unsigned int *)ctx->cpu_affinity_mask); ctx->nr_worker_threads = MIN(ctx->nr_worker_threads, TFE_THREAD_MAX); CPU_ZERO(&ctx->coremask); for (int i = 0; i < ctx->nr_worker_threads; i++) { int cpu_id = ctx->cpu_affinity_mask[i]; CPU_SET(cpu_id, &ctx->coremask); } ctx->io = packet_io_create(profile, ctx->nr_worker_threads, &ctx->coremask, logger); if (ctx->io == NULL) { goto error_out; } ctx->packet_io_fs = packet_io_fs_create(profile); if (ctx->packet_io_fs == NULL) { goto error_out; } ctx->metric = create_fieldstat_instance(profile, "proxy_hits", ctx->nr_worker_threads, logger); if(ctx->metric == NULL) { goto error_out; } return ctx; error_out: acceptor_kni_v4_destroy(ctx); return NULL; } static void *worker_thread_cycle(void *arg) { struct packet_io_thread_ctx *thread_ctx = (struct packet_io_thread_ctx *)arg; struct packet_io *handle = thread_ctx->ref_io; void * logger = thread_ctx->logger; int pkg_len = 0; char thread_name[16]; int n_pkt_recv_from_nf = 0; int n_pkt_recv_from_tap = 0; int n_pkt_recv_from_tap_c = 0; int n_pkt_recv_from_tap_s = 0; snprintf(thread_name, sizeof(thread_name), "pkt:worker-%d", thread_ctx->thread_index); prctl(PR_SET_NAME, (unsigned long long)thread_name, NULL, NULL, NULL); while (!worker_thread_ready) { sleep(1); } if (packet_io_thread_init(handle, thread_ctx, logger) != 0) { goto error_out; } if (is_enable_iouring(handle)) { io_uring_register_read_callback(thread_ctx->tap_ctx->io_uring_fd, handle_raw_packet_from_tap, thread_ctx); io_uring_register_read_callback(thread_ctx->tap_ctx->io_uring_c, handle_decryption_packet_from_tap, thread_ctx); io_uring_register_read_callback(thread_ctx->tap_ctx->io_uring_s, handle_decryption_packet_from_tap, thread_ctx); } else { thread_ctx->tap_ctx->buff_size = 3000; thread_ctx->tap_ctx->buff = ALLOC(char, thread_ctx->tap_ctx->buff_size); } TFE_LOG_INFO(logger, "%s: worker thread %d is running", "LOG_TAG_KNI", thread_ctx->thread_index); while(1) { n_pkt_recv_from_nf = packet_io_polling_nf_interface(handle, thread_ctx->thread_index, thread_ctx); if (is_enable_iouring(handle)) { n_pkt_recv_from_tap = io_uring_peek_ready_entrys(thread_ctx->tap_ctx->io_uring_fd); n_pkt_recv_from_tap_c = io_uring_peek_ready_entrys(thread_ctx->tap_ctx->io_uring_c); n_pkt_recv_from_tap_s = io_uring_peek_ready_entrys(thread_ctx->tap_ctx->io_uring_s); } else { if ((pkg_len = tap_read(thread_ctx->tap_ctx->tap_fd, thread_ctx->tap_ctx->buff, thread_ctx->tap_ctx->buff_size, logger)) > 0) { handle_raw_packet_from_tap(thread_ctx->tap_ctx->buff, pkg_len, thread_ctx); } if ((pkg_len = tap_read(thread_ctx->tap_ctx->tap_c, thread_ctx->tap_ctx->buff, thread_ctx->tap_ctx->buff_size, logger)) > 0) { handle_decryption_packet_from_tap(thread_ctx->tap_ctx->buff, pkg_len, thread_ctx); } if ((pkg_len = tap_read(thread_ctx->tap_ctx->tap_s, thread_ctx->tap_ctx->buff, thread_ctx->tap_ctx->buff_size, logger)) > 0) { handle_decryption_packet_from_tap(thread_ctx->tap_ctx->buff, pkg_len, thread_ctx); } } if (n_pkt_recv_from_nf == 0 && n_pkt_recv_from_tap == 0 && n_pkt_recv_from_tap_c == 0 && n_pkt_recv_from_tap_s == 0) { packet_io_thread_wait(handle, thread_ctx, -1); } if (__atomic_fetch_add(&thread_ctx->session_table_need_reset, 0, __ATOMIC_RELAXED) > 0) { session_table_reset(thread_ctx->session_table); __atomic_fetch_and(&thread_ctx->session_table_need_reset, 0, __ATOMIC_RELAXED); } } error_out: TFE_LOG_ERROR(logger, "%s: worker thread %d exiting", LOG_TAG_SCE, thread_ctx->thread_index); return (void *)NULL; } struct acceptor_kni_v4 *acceptor_kni_v4_create(struct tfe_proxy *proxy, const char *profile) { void *packet_io_logger = NULL; packet_io_logger = (void *)MESA_create_runtime_log_handle("packet_io", RLOG_LV_DEBUG); assert(packet_io_logger != NULL); g_packet_io_logger = packet_io_logger; struct acceptor_kni_v4 *acceptor_ctx = acceptor_ctx_create(profile, packet_io_logger); if (acceptor_ctx == NULL) return NULL; acceptor_ctx->ref_proxy = proxy; for (int i = 0; i < acceptor_ctx->nr_worker_threads; i++) { acceptor_ctx->work_threads[i].tid = 0; acceptor_ctx->work_threads[i].thread_index = i; acceptor_ctx->work_threads[i].ref_io = acceptor_ctx->io; acceptor_ctx->work_threads[i].ref_acceptor_ctx = acceptor_ctx; acceptor_ctx->work_threads[i].tap_ctx = tfe_tap_ctx_create(&acceptor_ctx->work_threads[i]); if (acceptor_ctx->work_threads[i].tap_ctx == NULL) goto error_out; acceptor_ctx->work_threads[i].session_table = session_table_create(); acceptor_ctx->work_threads[i].ref_proxy = proxy; acceptor_ctx->work_threads[i].ret_fs_state = acceptor_ctx->packet_io_fs; acceptor_ctx->work_threads[i].logger = packet_io_logger; acceptor_ctx->work_threads[i].session_table_need_reset = 0; } for (int i = 0; i < acceptor_ctx->nr_worker_threads; i++) { struct packet_io_thread_ctx *thread_ctx = &acceptor_ctx->work_threads[i]; if (pthread_create(&thread_ctx->tid, NULL, worker_thread_cycle, (void *)thread_ctx) < 0) { goto error_out; } } return acceptor_ctx; error_out: for (int i = 0; i < acceptor_ctx->nr_worker_threads; i++) { tfe_tap_ctx_destory(acceptor_ctx->work_threads[i].tap_ctx); session_table_destory(acceptor_ctx->work_threads[i].session_table); } acceptor_kni_v4_destroy(acceptor_ctx); return NULL; }