增加 load_balance 配置项:TFE 支持 round_robin/least_conn 负载均衡算法

This commit is contained in:
luwenpeng
2020-07-28 17:41:54 +08:00
parent c82429c9d9
commit f1b2d63816
3 changed files with 37 additions and 9 deletions

View File

@@ -8,6 +8,8 @@ enable_cpu_affinity=0
# the first mask for acceptor thread # the first mask for acceptor thread
# the others mask for worker thread # the others mask for worker thread
cpu_affinity_mask=1-9,10-12 cpu_affinity_mask=1-9,10-12
# LEAST_CONN = 0; ROUND_ROBIN = 1, default 1
load_balance=1
[kni] [kni]
ip=192.168.100.1 ip=192.168.100.1

View File

@@ -80,6 +80,12 @@ struct tfe_proxy_accept_para
unsigned int keyring_id; unsigned int keyring_id;
}; };
enum tfe_load_balance_algo
{
LEAST_CONN = 0,
ROUND_ROBIN = 1,
};
struct tfe_proxy struct tfe_proxy
{ {
char name[TFE_SYMBOL_MAX]; char name[TFE_SYMBOL_MAX];
@@ -128,6 +134,9 @@ struct tfe_proxy
/* cpu affinity */ /* cpu affinity */
unsigned int enable_cpu_affinity; unsigned int enable_cpu_affinity;
unsigned int cpu_affinity_mask[TFE_THREAD_MAX]; unsigned int cpu_affinity_mask[TFE_THREAD_MAX];
/* load balancing */
enum tfe_load_balance_algo load_balance;
}; };
extern struct tfe_proxy * g_default_proxy; extern struct tfe_proxy * g_default_proxy;

View File

@@ -93,19 +93,34 @@ static __attribute__((__used__)) const char * tfe_version = "Unknown";
struct tfe_thread_ctx * tfe_proxy_thread_ctx_acquire(struct tfe_proxy * ctx) struct tfe_thread_ctx * tfe_proxy_thread_ctx_acquire(struct tfe_proxy * ctx)
{ {
unsigned int min_thread_id = 0; unsigned int min_thread_id = 0;
unsigned int min_load;
static unsigned int counter=0; static unsigned int counter=0;
counter++; counter++;
/*
// least_conn
if (ctx->load_balance == LEAST_CONN)
{
for (unsigned int tid = 0; tid < ctx->nr_work_threads; tid++) for (unsigned int tid = 0; tid < ctx->nr_work_threads; tid++)
{ {
struct tfe_thread_ctx * thread_ctx = ctx->work_threads[tid]; struct tfe_thread_ctx * thread_ctx = ctx->work_threads[tid];
unsigned int thread_load = ATOMIC_READ(&thread_ctx->load); unsigned int thread_load = ATOMIC_READ(&thread_ctx->load);
if (tid == 0)
{
min_thread_id = tid;
min_load = thread_load;
continue;
}
min_thread_id = min_load > thread_load ? tid : min_thread_id; min_thread_id = min_load > thread_load ? tid : min_thread_id;
min_load = min_load > thread_load ? thread_load : min_load; min_load = min_load > thread_load ? thread_load : min_load;
} }
*/ }
min_thread_id=counter%ctx->nr_work_threads; // round_robin
else
{
min_thread_id = counter % ctx->nr_work_threads;
}
ATOMIC_INC(&ctx->work_threads[min_thread_id]->load); ATOMIC_INC(&ctx->work_threads[min_thread_id]->load);
return ctx->work_threads[min_thread_id]; return ctx->work_threads[min_thread_id];
} }
@@ -308,6 +323,8 @@ int tfe_proxy_config(struct tfe_proxy * proxy, const char * profile)
MESA_load_profile_uint_def(profile, "system", "buffer_output_limit", &proxy->buffer_output_limit, 0); MESA_load_profile_uint_def(profile, "system", "buffer_output_limit", &proxy->buffer_output_limit, 0);
MESA_load_profile_uint_def(profile, "system", "enable_cpu_affinity", &proxy->enable_cpu_affinity, 0); MESA_load_profile_uint_def(profile, "system", "enable_cpu_affinity", &proxy->enable_cpu_affinity, 0);
MESA_load_profile_uint_range(profile, "system", "cpu_affinity_mask", TFE_THREAD_MAX, proxy->cpu_affinity_mask); MESA_load_profile_uint_range(profile, "system", "cpu_affinity_mask", TFE_THREAD_MAX, proxy->cpu_affinity_mask);
// LEAST_CONN = 0; ROUND_ROBIN = 1,
MESA_load_profile_uint_def(profile, "system", "load_balance", (unsigned int *)&proxy->load_balance, ROUND_ROBIN);
if (proxy->nr_work_threads < 1 || proxy->nr_work_threads > TFE_THREAD_MAX) if (proxy->nr_work_threads < 1 || proxy->nr_work_threads > TFE_THREAD_MAX)
{ {