2023-02-10 14:22:40 +08:00
|
|
|
#include <errno.h>
|
|
|
|
|
#include <unistd.h>
|
2023-02-20 11:16:34 +08:00
|
|
|
#include <signal.h>
|
2023-02-10 14:22:40 +08:00
|
|
|
#include <pthread.h>
|
2023-03-02 16:13:02 +08:00
|
|
|
#include <sys/prctl.h>
|
2023-02-10 14:22:40 +08:00
|
|
|
|
|
|
|
|
#include "sce.h"
|
|
|
|
|
#include "log.h"
|
|
|
|
|
#include "utils.h"
|
2023-02-28 19:03:35 +08:00
|
|
|
#include "sf_metrics.h"
|
2023-02-22 20:32:37 +08:00
|
|
|
#include "health_check.h"
|
2023-02-21 09:58:31 +08:00
|
|
|
#include "global_metrics.h"
|
2023-02-10 14:22:40 +08:00
|
|
|
|
2023-02-20 11:16:34 +08:00
|
|
|
static void sig_handler(int signo)
|
|
|
|
|
{
|
|
|
|
|
if (signo == SIGHUP)
|
|
|
|
|
{
|
|
|
|
|
LOG_INFO("%s: recv SIGHUP, reload zlog.conf", LOG_TAG_SCE);
|
|
|
|
|
LOG_RELOAD();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-03-02 16:13:02 +08:00
|
|
|
static int thread_set_affinity(int core_id)
|
|
|
|
|
{
|
|
|
|
|
int num_cores = sysconf(_SC_NPROCESSORS_ONLN);
|
|
|
|
|
if (core_id < 0 || core_id >= num_cores)
|
|
|
|
|
{
|
|
|
|
|
return EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cpu_set_t cpuset;
|
|
|
|
|
CPU_ZERO(&cpuset);
|
|
|
|
|
CPU_SET(core_id, &cpuset);
|
|
|
|
|
|
|
|
|
|
return pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-10 14:22:40 +08:00
|
|
|
static void *worker_thread_cycle(void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct thread_ctx *thread_ctx = (struct thread_ctx *)arg;
|
|
|
|
|
struct packet_io *handle = thread_ctx->ref_io;
|
|
|
|
|
int n_packet_recv;
|
|
|
|
|
|
2023-03-02 16:13:02 +08:00
|
|
|
char thread_name[16];
|
|
|
|
|
snprintf(thread_name, sizeof(thread_name), "sce:worker-%d", thread_ctx->thread_index);
|
|
|
|
|
prctl(PR_SET_NAME, (unsigned long long)thread_name, NULL, NULL, NULL);
|
|
|
|
|
|
|
|
|
|
char affinity[32] = {0};
|
|
|
|
|
if (thread_ctx->cpu_mask >= 0)
|
|
|
|
|
{
|
|
|
|
|
thread_set_affinity(thread_ctx->cpu_mask);
|
|
|
|
|
snprintf(affinity, sizeof(affinity), "affinity cpu%d", thread_ctx->cpu_mask);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LOG_INFO("%s: worker thread %d %s is running", LOG_TAG_SCE, thread_ctx->thread_index, thread_ctx->cpu_mask >= 0 ? affinity : "");
|
2023-02-10 14:22:40 +08:00
|
|
|
|
|
|
|
|
while (1)
|
|
|
|
|
{
|
|
|
|
|
n_packet_recv = packet_io_polling_nf_interface(handle, thread_ctx->thread_index, thread_ctx);
|
|
|
|
|
if (n_packet_recv)
|
|
|
|
|
{
|
2023-02-27 14:37:31 +08:00
|
|
|
// LOG_INFO("%s: worker thread %d recv %03d packets from nf_interface", LOG_TAG_SCE, thread_ctx->thread_index, n_packet_recv);
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
n_packet_recv = packet_io_polling_endpoint(handle, thread_ctx->thread_index, thread_ctx);
|
|
|
|
|
if (n_packet_recv)
|
|
|
|
|
{
|
2023-02-27 14:37:31 +08:00
|
|
|
// LOG_INFO("%s: worker thread %d recv %03d packets from endpoint", LOG_TAG_SCE, thread_ctx->thread_index, n_packet_recv);
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|
|
|
|
|
|
2023-02-17 17:45:39 +08:00
|
|
|
if (__atomic_fetch_add(&thread_ctx->session_table_need_reset, 0, __ATOMIC_RELAXED) > 0)
|
|
|
|
|
{
|
|
|
|
|
session_table_reset(thread_ctx->session_table);
|
|
|
|
|
__atomic_fetch_and(&thread_ctx->session_table_need_reset, 0, __ATOMIC_RELAXED);
|
|
|
|
|
}
|
2023-02-28 19:03:35 +08:00
|
|
|
|
|
|
|
|
if (__atomic_fetch_add(&thread_ctx->sf_metrics_need_send, 0, __ATOMIC_RELAXED) > 0)
|
|
|
|
|
{
|
|
|
|
|
sf_metrics_send(thread_ctx->sf_metrics);
|
|
|
|
|
sf_metrics_reset(thread_ctx->sf_metrics);
|
|
|
|
|
__atomic_fetch_and(&thread_ctx->sf_metrics_need_send, 0, __ATOMIC_RELAXED);
|
|
|
|
|
}
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
LOG_ERROR("%s: worker thread %d exiting", LOG_TAG_SCE, thread_ctx->thread_index);
|
|
|
|
|
return (void *)NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-10 09:57:29 +08:00
|
|
|
int main(int argc, char **argv)
|
|
|
|
|
{
|
2023-02-10 14:22:40 +08:00
|
|
|
const char *profile = "./conf/sce.conf";
|
|
|
|
|
|
2023-02-20 11:16:34 +08:00
|
|
|
if (LOG_INIT("./conf/zlog.conf") == -1)
|
|
|
|
|
{
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (signal(SIGHUP, sig_handler) == SIG_ERR)
|
|
|
|
|
{
|
|
|
|
|
LOG_ERROR("%s: unable to register SIGHUP signal handler, error %d: %s", LOG_TAG_SCE, errno, strerror(errno));
|
|
|
|
|
LOG_CLOSE();
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-24 15:41:57 +08:00
|
|
|
health_check_session_init(profile);
|
|
|
|
|
|
2023-02-10 14:22:40 +08:00
|
|
|
struct sce_ctx *ctx = sce_ctx_create(profile);
|
|
|
|
|
if (ctx == NULL)
|
|
|
|
|
{
|
2023-02-20 11:16:34 +08:00
|
|
|
LOG_CLOSE();
|
2023-02-10 14:22:40 +08:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < ctx->nr_worker_threads; i++)
|
|
|
|
|
{
|
|
|
|
|
ctx->work_threads[i].tid = 0;
|
|
|
|
|
ctx->work_threads[i].thread_index = i;
|
|
|
|
|
ctx->work_threads[i].session_table = session_table_create();
|
2023-02-28 19:03:35 +08:00
|
|
|
ctx->work_threads[i].sf_metrics = sf_metrics_create(profile);
|
2023-02-10 14:22:40 +08:00
|
|
|
ctx->work_threads[i].ref_io = ctx->io;
|
|
|
|
|
ctx->work_threads[i].ref_metrics = ctx->metrics;
|
|
|
|
|
ctx->work_threads[i].ref_enforcer = ctx->enforcer;
|
2023-02-17 17:45:39 +08:00
|
|
|
ctx->work_threads[i].ref_sce_ctx = ctx;
|
|
|
|
|
ctx->work_threads[i].session_table_need_reset = 0;
|
2023-02-28 19:03:35 +08:00
|
|
|
ctx->work_threads[i].sf_metrics_need_send = 0;
|
2023-03-02 16:13:02 +08:00
|
|
|
ctx->work_threads[i].cpu_mask = ctx->enable_cpu_affinity ? ctx->cpu_affinity_mask[i] : -1;
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int i = 0; i < ctx->nr_worker_threads; i++)
|
|
|
|
|
{
|
|
|
|
|
struct thread_ctx *thread_ctx = &ctx->work_threads[i];
|
|
|
|
|
if (pthread_create(&thread_ctx->tid, NULL, worker_thread_cycle, (void *)thread_ctx) < 0)
|
|
|
|
|
{
|
|
|
|
|
LOG_ERROR("%s: unable to create worker thread %d, error %d: %s", LOG_TAG_SCE, i, errno, strerror(errno));
|
|
|
|
|
goto error_out;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-28 19:03:35 +08:00
|
|
|
struct timespec current_time;
|
|
|
|
|
struct timespec g_metrics_last_send_time;
|
|
|
|
|
struct timespec sf_metrics_last_send_time;
|
|
|
|
|
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, ¤t_time);
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &g_metrics_last_send_time);
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &sf_metrics_last_send_time);
|
|
|
|
|
|
2023-02-10 14:22:40 +08:00
|
|
|
while (1)
|
|
|
|
|
{
|
2023-02-28 19:03:35 +08:00
|
|
|
if (current_time.tv_sec - g_metrics_last_send_time.tv_sec >= ctx->metrics->config.statsd_cycle)
|
|
|
|
|
{
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &g_metrics_last_send_time);
|
|
|
|
|
global_metrics_dump(ctx->metrics);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (current_time.tv_sec - sf_metrics_last_send_time.tv_sec >= sf_metrics_get_interval(ctx->work_threads[0].sf_metrics))
|
|
|
|
|
{
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, &sf_metrics_last_send_time);
|
|
|
|
|
for (int i = 0; i < ctx->nr_worker_threads; i++)
|
|
|
|
|
{
|
|
|
|
|
struct thread_ctx *thread_ctx = &ctx->work_threads[i];
|
|
|
|
|
__atomic_fetch_add(&thread_ctx->sf_metrics_need_send, 1, __ATOMIC_RELAXED);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
sleep(MIN(ctx->metrics->config.statsd_cycle, sf_metrics_get_interval(ctx->work_threads[0].sf_metrics)));
|
|
|
|
|
clock_gettime(CLOCK_MONOTONIC, ¤t_time);
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
error_out:
|
|
|
|
|
for (int i = 0; i < ctx->nr_worker_threads; i++)
|
|
|
|
|
{
|
|
|
|
|
struct thread_ctx *thread_ctx = &ctx->work_threads[i];
|
|
|
|
|
session_table_destory(thread_ctx->session_table);
|
2023-02-28 19:03:35 +08:00
|
|
|
sf_metrics_destory(thread_ctx->sf_metrics);
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|
|
|
|
|
sce_ctx_destory(ctx);
|
|
|
|
|
|
2023-02-20 11:16:34 +08:00
|
|
|
LOG_CLOSE();
|
|
|
|
|
|
2023-01-10 09:57:29 +08:00
|
|
|
return 0;
|
2023-02-10 14:22:40 +08:00
|
|
|
}
|