增加TCP Passthrough功能实现,调通明文转发流程。
This commit is contained in:
@@ -28,47 +28,80 @@
|
||||
#include <proxy.h>
|
||||
#include <kni_acceptor.h>
|
||||
#include <tcp_stream.h>
|
||||
#include <MESA/MESA_prof_load.h>
|
||||
|
||||
static int signals[] = {SIGTERM, SIGQUIT, SIGHUP, SIGINT, SIGPIPE, SIGUSR1};
|
||||
static int signals[] = {SIGTERM, SIGQUIT, SIGHUP, SIGPIPE, SIGUSR1};
|
||||
|
||||
/* Global Resource */
|
||||
void * g_default_logger = NULL;
|
||||
struct tfe_proxy * g_default_proxy = NULL;
|
||||
|
||||
/* Per thread resource */
|
||||
thread_local unsigned int __currect_thread_id = 0;
|
||||
thread_local void * __currect_default_logger = NULL;
|
||||
|
||||
const char * module_name_pxy = "TFE_PXY";
|
||||
extern struct tfe_instance * g_tfe_instance;
|
||||
struct tfe_thread_ctx * tfe_proxy_thread_ctx_acquire(struct tfe_proxy * ctx)
|
||||
{
|
||||
unsigned int min_thread_id = 0;
|
||||
unsigned int min_load = 0;
|
||||
|
||||
__thread int __currect_thread_id;
|
||||
for(unsigned int tid = 0; tid < ctx->nr_work_threads; tid++)
|
||||
{
|
||||
struct tfe_thread_ctx * thread_ctx = ctx->work_threads[tid];
|
||||
min_thread_id = min_load > thread_ctx->load ? tid : min_thread_id;
|
||||
min_load = min_load > thread_ctx->load ? thread_ctx->load : min_load;
|
||||
}
|
||||
|
||||
ctx->work_threads[min_thread_id]->load++;
|
||||
return ctx->work_threads[min_thread_id];
|
||||
}
|
||||
|
||||
void tfe_proxy_thread_ctx_release(struct tfe_thread_ctx * thread_ctx)
|
||||
{
|
||||
thread_ctx->load--;
|
||||
}
|
||||
|
||||
int tfe_proxy_fds_accept(struct tfe_proxy * ctx, const struct tfe_proxy_accept_para * para)
|
||||
{
|
||||
tfe_thread_ctx * worker_thread_ctx = tfe_proxy_thread_ctx_acquire(ctx);
|
||||
|
||||
struct tfe_stream * stream = tfe_stream_create(ctx, worker_thread_ctx);
|
||||
tfe_stream_option_set(stream, TFE_STREAM_OPT_SESSION_TYPE, ¶->session_type, sizeof(para->session_type));
|
||||
|
||||
/* FOR DEBUG */
|
||||
if (para->passthrough || ctx->tcp_all_passthrough)
|
||||
{
|
||||
bool __true = true;
|
||||
enum tfe_session_proto __session_type = SESSION_PROTO_PLAIN;
|
||||
|
||||
tfe_stream_option_set(stream, TFE_STREAM_OPT_PASSTHROUGH, &__true, sizeof(__true));
|
||||
tfe_stream_option_set(stream, TFE_STREAM_OPT_SESSION_TYPE, &__session_type, sizeof(__session_type));
|
||||
}
|
||||
|
||||
tfe_stream_init_by_fds(stream, para->downstream_fd, para->upstream_fd);
|
||||
|
||||
TFE_LOG_DEBUG(ctx->logger, "%p, Fds(downstream = %d, upstream = %d, type = %d) accepted",
|
||||
stream, para->downstream_fd, para->upstream_fd, para->session_type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tfe_proxy_loopbreak(tfe_proxy * ctx)
|
||||
{
|
||||
event_base_loopbreak(ctx->evbase);
|
||||
}
|
||||
|
||||
void tfe_proxy_free(tfe_proxy * ctx)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void __dummy_event_handler(evutil_socket_t fd, short what, void * arg)
|
||||
{
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Thread entry point; runs the event loop of the event base.
|
||||
* Does not exit until the libevent loop is broken explicitly.
|
||||
*/
|
||||
static void * __tfe_thrmgr_thread_entry(void * arg)
|
||||
{
|
||||
struct tfe_thread_ctx * ctx = (struct tfe_thread_ctx *) arg;
|
||||
struct timeval timer_delay = {60, 0};
|
||||
|
||||
struct event * ev;
|
||||
ev = event_new(ctx->evbase, -1, EV_PERSIST, __dummy_event_handler, NULL);
|
||||
|
||||
if (!ev) return (void *)NULL;
|
||||
|
||||
evtimer_add(ev, &timer_delay);
|
||||
ctx->running = 1;
|
||||
|
||||
__currect_thread_id = ctx->thread_id;
|
||||
event_base_dispatch(ctx->evbase);
|
||||
event_free(ev);
|
||||
|
||||
return (void *)NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void proxy_signal_cb(evutil_socket_t fd, short what, void * arg)
|
||||
static void __signal_handler_cb(evutil_socket_t fd, short what, void * arg)
|
||||
{
|
||||
tfe_proxy * ctx = (tfe_proxy *) arg;
|
||||
switch (fd)
|
||||
@@ -81,178 +114,124 @@ static void proxy_signal_cb(evutil_socket_t fd, short what, void * arg)
|
||||
case SIGUSR1:
|
||||
break;
|
||||
case SIGPIPE:
|
||||
TFE_LOG_ERROR(ctx->main_logger, "Warning: Received SIGPIPE; ignoring.\n");
|
||||
TFE_LOG_ERROR(ctx->logger, "Warning: Received SIGPIPE; ignoring.\n");
|
||||
break;
|
||||
default:
|
||||
TFE_LOG_ERROR(ctx->main_logger, "Warning: Received unexpected signal %i\n", fd);
|
||||
TFE_LOG_ERROR(ctx->logger, "Warning: Received unexpected signal %i\n", fd);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void proxy_gc_cb(evutil_socket_t fd, short what, void * arg)
|
||||
static void __gc_handler_cb(evutil_socket_t fd, short what, void * arg)
|
||||
{
|
||||
tfe_proxy * ctx = (tfe_proxy *) arg;
|
||||
(void)fd;
|
||||
(void)what;
|
||||
}
|
||||
|
||||
unsigned int select_work_thread(struct tfe_proxy * pxy)
|
||||
static void * __thread_ctx_entry(void * arg)
|
||||
{
|
||||
unsigned int min_thread_id = 0;
|
||||
size_t min_load = pxy->work_threads[min_thread_id].load;
|
||||
struct tfe_thread_ctx * ctx = (struct tfe_thread_ctx *) arg;
|
||||
struct timeval timer_delay = {60, 0};
|
||||
|
||||
for (unsigned thread_id = 1; thread_id < pxy->nr_work_threads; thread_id++)
|
||||
struct event * ev = event_new(ctx->evbase, -1, EV_PERSIST, __dummy_event_handler, NULL);
|
||||
if (unlikely(ev == NULL))
|
||||
{
|
||||
if (min_load > pxy->work_threads[thread_id].load)
|
||||
{
|
||||
min_load = pxy->work_threads[thread_id].load;
|
||||
min_thread_id = thread_id;
|
||||
}
|
||||
TFE_LOG_ERROR(g_default_logger, "Failed at creating dummy event for thread %u", ctx->thread_id);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
pxy->work_threads[min_thread_id].load++;
|
||||
return min_thread_id;
|
||||
}
|
||||
/*
|
||||
* Callback for accept events on the socket listener bufferevent.
|
||||
*/
|
||||
evtimer_add(ev, &timer_delay);
|
||||
ctx->running = 1;
|
||||
__currect_thread_id = ctx->thread_id;
|
||||
|
||||
int tfe_proxy_fds_accept(struct tfe_proxy * ctx, const struct tfe_proxy_accept_para * para)
|
||||
{
|
||||
unsigned int worker_tid = select_work_thread(ctx);
|
||||
tfe_thread_ctx * worker_thread_ctx = &ctx->work_threads[worker_tid];
|
||||
TFE_LOG_INFO(g_default_logger, "Thread %u is running...", ctx->thread_id);
|
||||
event_base_dispatch(ctx->evbase);
|
||||
event_free(ev);
|
||||
|
||||
struct tfe_stream * stream = tfe_stream_create(ctx, worker_thread_ctx);
|
||||
tfe_stream_init_by_fds(stream, para->session_type, para->downstream_fd, para->upstream_fd);
|
||||
|
||||
return 0;
|
||||
return (void *)NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the core event loop.
|
||||
* Socket clisock is the privsep client socket used for binding to ports.
|
||||
* Returns ctx on success, or NULL on error.
|
||||
*/
|
||||
struct tfe_proxy * tfe_proxy_new(const char * profile)
|
||||
struct tfe_thread_ctx * __thread_ctx_create(struct tfe_proxy * proxy, unsigned int thread_id)
|
||||
{
|
||||
struct tfe_proxy * proxy = ALLOC(struct tfe_proxy, 1);
|
||||
assert(proxy != NULL);
|
||||
struct tfe_thread_ctx * __thread_ctx = ALLOC(struct tfe_thread_ctx, 1);
|
||||
assert(__thread_ctx != NULL);
|
||||
|
||||
struct timeval gc_delay = {60, 0};
|
||||
__thread_ctx->thread_id = thread_id;
|
||||
__thread_ctx->evbase = event_base_new();
|
||||
|
||||
/* adds locking, only required if accessed from separate threads */
|
||||
evthread_use_pthreads();
|
||||
event_enable_debug_mode();
|
||||
|
||||
proxy->evbase = event_base_new();
|
||||
proxy->nr_modules = 2;
|
||||
proxy->modules = ALLOC(struct tfe_plugin, proxy->nr_modules);
|
||||
|
||||
proxy->modules[0].proto = APP_PROTO_HTTP1;
|
||||
proxy->modules[1].proto = APP_PROTO_HTTP2;
|
||||
|
||||
proxy->work_threads = ALLOC(struct tfe_thread_ctx, proxy->nr_work_threads);
|
||||
proxy->io_mod = kni_acceptor_init(proxy, profile, NULL);
|
||||
|
||||
for (unsigned int i = 0; i < proxy->nr_work_threads; i++)
|
||||
int ret = pthread_create(&__thread_ctx->thr, NULL, __thread_ctx_entry, (void *)__thread_ctx);
|
||||
if (unlikely(ret < 0))
|
||||
{
|
||||
proxy->work_threads[i].thread_id = i;
|
||||
proxy->work_threads[i].evbase = event_base_new();
|
||||
proxy->work_threads[i].nr_modules = proxy->nr_modules;
|
||||
proxy->work_threads[i].modules = proxy->modules;
|
||||
TFE_LOG_ERROR(proxy->logger, "Failed at pthread_create() for thread %d: %s", strerror(errno));
|
||||
goto __errout;
|
||||
}
|
||||
|
||||
//Todo: Not handle signal if have mutliple proxy instance.
|
||||
for (size_t i = 0; i < (sizeof(signals) / sizeof(int)); i++)
|
||||
{
|
||||
proxy->sev[i] = evsignal_new(proxy->evbase, signals[i], proxy_signal_cb, proxy);
|
||||
if (!proxy->sev[i]) goto error_out;
|
||||
evsignal_add(proxy->sev[i], NULL);
|
||||
}
|
||||
return __thread_ctx;
|
||||
|
||||
proxy->gcev = event_new(proxy->evbase, -1, EV_PERSIST, proxy_gc_cb, proxy);
|
||||
if (!proxy->gcev) goto error_out;
|
||||
|
||||
evtimer_add(proxy->gcev, &gc_delay);
|
||||
return proxy;
|
||||
|
||||
error_out:
|
||||
if (proxy->gcev)
|
||||
{
|
||||
event_free(proxy->gcev);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < (sizeof(proxy->sev) / sizeof(proxy->sev[0])); i++)
|
||||
{
|
||||
if (proxy->sev[i])
|
||||
{
|
||||
event_free(proxy->sev[i]);
|
||||
}
|
||||
}
|
||||
|
||||
for (typeof(proxy->nr_work_threads) i = 0; i < proxy->nr_work_threads; i++)
|
||||
{
|
||||
proxy->work_threads[i].thread_id = i;
|
||||
event_base_free(proxy->work_threads[i].evbase);
|
||||
}
|
||||
|
||||
event_base_free(proxy->evbase);
|
||||
|
||||
free(proxy);
|
||||
__errout:
|
||||
if (__thread_ctx != NULL && __thread_ctx->evbase != NULL) event_base_free(__thread_ctx->evbase);
|
||||
if (__thread_ctx != NULL) free(__thread_ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Run the event loop. Returns when the event loop is cancelled by a signal
|
||||
* or on failure.
|
||||
*/
|
||||
void tfe_proxy_run(struct tfe_proxy * proxy)
|
||||
int tfe_proxy_config(struct tfe_proxy * proxy, const char * profile)
|
||||
{
|
||||
unsigned int thread_id;
|
||||
for (thread_id = 0; thread_id < proxy->nr_work_threads; thread_id++)
|
||||
{
|
||||
if (pthread_create(&(proxy->work_threads[thread_id].thr), NULL,
|
||||
__tfe_thrmgr_thread_entry, &(proxy->work_threads[thread_id])))
|
||||
{
|
||||
MESA_handle_runtime_log(proxy->main_logger, RLOG_LV_FATAL, proxy->name, "pthread_create failed.");
|
||||
}
|
||||
|
||||
while (!proxy->work_threads[thread_id].running)
|
||||
{
|
||||
sched_yield();
|
||||
}
|
||||
}
|
||||
|
||||
event_base_dispatch(proxy->evbase);
|
||||
MESA_load_profile_uint_def(profile, "main", "nr_worker_threads", &proxy->nr_work_threads, 1);
|
||||
MESA_load_profile_uint_def(profile, "debug", "passthrough_all_tcp", &proxy->tcp_all_passthrough, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Break the loop of the proxy, causing the tfe_proxy_run to return.
|
||||
*/
|
||||
void proxy_loopbreak(tfe_proxy * ctx)
|
||||
{
|
||||
event_base_loopbreak(ctx->evbase);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the proxy data structures.
|
||||
*/
|
||||
void proxy_free(tfe_proxy * ctx)
|
||||
{
|
||||
|
||||
}
|
||||
#define CHECK_OR_EXIT(condition, fmt, ...) \
|
||||
do { if(!(condition)) { TFE_LOG_ERROR(g_default_logger, fmt, ##__VA_ARGS__); exit(EXIT_FAILURE); } } while(0) \
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char* main_profile="./conf/tfe_main.conf";
|
||||
const char* main_profile="./conf/tfe.conf";
|
||||
|
||||
tfe_proxy *proxy=NULL;
|
||||
void* wcfg_handle=NULL;
|
||||
g_default_logger = MESA_create_runtime_log_handle("log/tfe.log", RLOG_LV_DEBUG);
|
||||
if (unlikely(g_default_logger == NULL))
|
||||
{
|
||||
TFE_LOG_ERROR(g_default_logger, "Failed at creating default logger: %s", "log/tfe.log");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
//TODO: Initiate Local Cert Cache, Decryption Mirror, Field Stat, Logger and etc.
|
||||
//NOTICE: Maat, Cert Store,Tango Cache are initiated in bussiness plugin.
|
||||
/* PROXY INSTANCE */
|
||||
g_default_proxy = ALLOC(struct tfe_proxy, 1);
|
||||
assert(g_default_proxy);
|
||||
|
||||
proxy=tfe_proxy_new(main_profile);
|
||||
tfe_proxy_run(proxy);
|
||||
proxy_free(proxy);
|
||||
/* CONFIG */
|
||||
int ret = tfe_proxy_config(g_default_proxy, main_profile);
|
||||
CHECK_OR_EXIT(ret == 0, "Failed at loading profile %s, Exit.", main_profile);
|
||||
|
||||
/* LOGGER */
|
||||
g_default_proxy->logger = g_default_logger;
|
||||
|
||||
/* MAIN THREAD EVBASE */
|
||||
g_default_proxy->evbase = event_base_new();
|
||||
CHECK_OR_EXIT(g_default_proxy->evbase, "Failed at creating evbase for main thread. Exit.");
|
||||
|
||||
/* GC EVENT */
|
||||
g_default_proxy->gcev = event_new(g_default_proxy->evbase, -1, EV_PERSIST, __gc_handler_cb, g_default_proxy);
|
||||
CHECK_OR_EXIT(g_default_proxy->gcev, "Failed at creating GC event. Exit. ");
|
||||
|
||||
/* MODULE INIT */
|
||||
g_default_proxy->kni_acceptor_handler = kni_acceptor_init(g_default_proxy, main_profile, g_default_logger);
|
||||
CHECK_OR_EXIT(g_default_proxy->kni_acceptor_handler, "Failed at init KNI acceptor. Exit. ");
|
||||
|
||||
struct timeval gc_delay = {60, 0};
|
||||
evtimer_add(g_default_proxy->gcev , &gc_delay);
|
||||
|
||||
/* WORKER THREAD */
|
||||
for(unsigned tid = 0; tid < g_default_proxy->nr_work_threads; tid++)
|
||||
{
|
||||
g_default_proxy->work_threads[tid] = __thread_ctx_create(g_default_proxy, tid);
|
||||
CHECK_OR_EXIT(g_default_proxy->work_threads[tid], "Failed at creating thread %u", tid);
|
||||
}
|
||||
|
||||
TFE_LOG_ERROR(g_default_logger, "Tango Frontend Engine initialized. ");
|
||||
event_base_dispatch(g_default_proxy->evbase);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user