管控策略日志增加字段common_data_center

管控策略统计计数修改
解密流量删除用户自定义判断fata日志
This commit is contained in:
fengweihao
2020-10-23 19:03:08 +08:00
committed by 卢文朋
parent 99731ae689
commit b1c3ba754a
8 changed files with 89 additions and 5 deletions

View File

@@ -5,6 +5,7 @@ enum RESOURCE_TYPE
STATIC_MAAT,
KAFKA_LOGGER,
DEVICE_ID,
DATA_CENTER,
};
enum TABLE_TYPE

View File

@@ -14,9 +14,11 @@ struct maat_table_info
int id;
const char *name;
};
static Maat_feather_t static_maat = NULL;
static tfe_kafka_logger_t *kafka_logger = NULL;
static char *device_id = NULL;
static char *data_center=NULL;
static Maat_feather_t create_maat_feather(const char *instance_name, const char *profile, const char *section, int max_thread, void *logger)
{
@@ -231,6 +233,61 @@ finish:
return (char *)device_def_id;
}
static char* create_data_center(const char *profile, const char *section, void *logger)
{
int i =0;
char *data_cneter=NULL;
char accept_tag_key[TFE_PATH_MAX] = {0};
char accept_path[TFE_PATH_MAX] = {0}, accept_tags[TFE_STRING_MAX] = {0};
MESA_load_profile_string_def(profile, section, "accept_path", accept_path, sizeof(accept_path), "");
if(strlen(accept_path) > 0)
{
MESA_load_profile_string_def(accept_path, "maat", "ACCEPT_TAGS", accept_tags, sizeof(accept_tags), "");
}
if(strlen(accept_tags) <= 0)
{
return NULL;
}
MESA_load_profile_string_def(profile, section, "accept_tag_key", accept_tag_key, sizeof(accept_tag_key), "data_center");
cJSON *object=cJSON_Parse(accept_tags);
if(object == NULL)
{
return NULL;
}
cJSON *array=cJSON_GetObjectItem(object, "tags");
if(array==NULL && array->type!=cJSON_Array)
{
TFE_LOG_ERROR(logger, "Invalid tags parameter: %s invalid json format", accept_tags);
goto finish;
}
for(i=0; i<cJSON_GetArraySize(array); i++)
{
cJSON *item=cJSON_GetArrayItem(array, i);
if(!item)
{
continue;
}
cJSON *tag_item=cJSON_GetObjectItem(item, "tag");
if(tag_item && tag_item->valuestring!=NULL &&
(memcmp(accept_tag_key, tag_item->valuestring, strlen(accept_tag_key)))==0)
{
cJSON *sub_item=cJSON_GetObjectItem(item, "value");
if(sub_item && sub_item->valuestring!=NULL)
{
data_cneter = tfe_strdup(sub_item->valuestring);
TFE_LOG_INFO(logger, "tfe data center : %s", data_cneter);
}
}
}
finish:
cJSON_Delete(object);
return data_cneter;
}
static struct maat_table_info maat_pub_tables[TABLE_TYPE_MAX] = {
{0, "TSG_SECURITY_SOURCE_ASN"},
{0, "TSG_SECURITY_DESTINATION_ASN"},
@@ -271,6 +328,8 @@ int tfe_bussiness_resouce_init()
device_id = cerate_device_id(profile_path, "kafka", g_default_logger);
data_center = create_data_center(profile_path, "MAAT", g_default_logger);
if (register_maat_table())
{
return -1;
@@ -289,6 +348,8 @@ void *tfe_bussiness_resouce_get(enum RESOURCE_TYPE type)
return kafka_logger;
case DEVICE_ID:
return device_id;
case DATA_CENTER:
return data_center;
default:
return NULL;
}

View File

@@ -155,6 +155,7 @@ stat_switch=1
perf_switch=1
table_info=resource/pangu/table_info.conf
accept_path=/opt/tsg/etc/tsg_device_tag.json
accept_tag_key=device_id
stat_file=log/pangu_scan.fs2
effect_interval_s=1
deferred_load_on=0

View File

@@ -286,6 +286,7 @@ int doh_kafka_init(const char *profile, struct doh_conf *conf)
return 0;
}
conf->device_id = (const char *)tfe_bussiness_resouce_get(DEVICE_ID);
conf->data_center = (const char *)tfe_bussiness_resouce_get(DATA_CENTER);
conf->kafka_logger = (tfe_kafka_logger_t *)tfe_bussiness_resouce_get(KAFKA_LOGGER);
if (conf->kafka_logger && !conf->kafka_logger->enable)
{
@@ -401,6 +402,11 @@ int doh_send_log(struct doh_conf *handle, const struct tfe_http_session *http, c
cJSON_AddNumberToObject(common_obj, "common_s2c_byte_num", s2c_byte_num);
cJSON_AddStringToObject(common_obj, "doh_url", http->req->req_spec.url);
cJSON_AddStringToObject(common_obj, "doh_host", http->req->req_spec.host);
if(handle->data_center)
{
cJSON_AddStringToObject(common_obj, "common_data_center", handle->data_center);
}
for (size_t i = 0; i < sizeof(req_fields) / sizeof(struct json_spec); i++)
{
tmp_val = tfe_http_std_field_read(http->req, req_fields[i].field_id);

View File

@@ -59,6 +59,7 @@ struct doh_conf
int entry_id;
int en_sendlog;
const char *device_id;
const char *data_center;
tfe_kafka_logger_t *kafka_logger;
int fs_id[DOH_STAT_MAX];

View File

@@ -2071,7 +2071,6 @@ void enforce_control_policy(const struct tfe_stream * stream, const struct tfe_h
case PG_ACTION_NONE:
break;
case PG_ACTION_MONIT:
ATOMIC_INC(&(g_pangu_rt->stat_val[STAT_ACTION_MONIT]));
//send log on close.
break;
case PG_ACTION_REJECT:
@@ -2407,6 +2406,7 @@ void pangu_on_http_begin(const struct tfe_stream * stream,
}
if (ctx->action == PG_ACTION_WHITELIST)
{
ATOMIC_INC(&(g_pangu_rt->stat_val[STAT_ACTION_WHITELSIT]));
TFE_LOG_INFO(g_pangu_rt->local_logger, "Bypass rules matched on http begin: url=%s policy id=%d.",
session->req->req_spec.url, ctx->enforce_rules[0].config_id);
tfe_http_session_detach(session);
@@ -2422,7 +2422,7 @@ static inline int ctx_actually_replaced(struct pangu_http_ctx * ctx)
if(ctx->action == PG_ACTION_MANIPULATE &&
ctx->param->action == MA_ACTION_REPLACE &&
ctx->n_enforce==1 && ctx->rep_ctx->actually_replaced==1)
ctx->rep_ctx->actually_replaced==1)
{
return 1;
}
@@ -2437,7 +2437,7 @@ static inline int ctx_actually_inserted(struct pangu_http_ctx * ctx)
if(ctx->action == PG_ACTION_MANIPULATE &&
ctx->param->action == MA_ACTION_INSERT &&
ctx->n_enforce==1 && ctx->ins_ctx->actually_inserted==1)
ctx->ins_ctx->actually_inserted==1)
{
return 1;
}
@@ -2452,7 +2452,7 @@ static inline int ctx_actually_manipulate(struct pangu_http_ctx * ctx)
if(ctx->action == PG_ACTION_MANIPULATE &&
(ctx->param->action == MA_ACTION_REDIRECT ||
ctx->param->action == MA_ACTION_HIJACK)&&
ctx->n_enforce==1 && ctx->manipulate_replaced==1)
ctx->manipulate_replaced==1)
{
return 1;
}
@@ -2514,6 +2514,14 @@ void pangu_on_http_end(const struct tfe_stream * stream,
{
ret=pangu_send_log(g_pangu_rt->send_logger, &log_msg);
ATOMIC_ADD(&(g_pangu_rt->stat_val[STAT_LOG_NUM]), ret);
for(i=0; i< ctx->n_enforce; i++)
{
if(ctx->enforce_rules[i].action == PG_ACTION_MONIT)
{
ATOMIC_INC(&(g_pangu_rt->stat_val[STAT_ACTION_MONIT]));
}
}
}
if(ctx->rep_ctx && ctx->rep_ctx->actually_replaced==1 && ctx->enforce_rules[0].do_log ==1)

View File

@@ -17,6 +17,7 @@ struct pangu_logger
int entry_id;
unsigned int en_sendlog;
const char *device_id;
const char *data_center;
void* local_logger;
unsigned long long send_cnt;
@@ -58,6 +59,7 @@ struct pangu_logger* pangu_log_handle_create(const char* profile, const char* s
}
instance->device_id = (const char *)tfe_bussiness_resouce_get(DEVICE_ID);
instance->data_center = (const char *)tfe_bussiness_resouce_get(DATA_CENTER);
instance->kafka_logger = (tfe_kafka_logger_t *)tfe_bussiness_resouce_get(KAFKA_LOGGER);
if (instance->kafka_logger && !instance->kafka_logger->enable)
{
@@ -186,6 +188,11 @@ int pangu_send_log(struct pangu_logger* handle, const struct pangu_log* log_msg)
cJSON_AddNumberToObject(common_obj, "common_s2c_byte_num", s2c_byte_num);
cJSON_AddStringToObject(common_obj, "http_url", http->req->req_spec.url);
cJSON_AddStringToObject(common_obj, "http_host", http->req->req_spec.host);
if(handle->data_center)
{
cJSON_AddStringToObject(common_obj, "common_data_center", handle->data_center);
}
for(size_t i=0;i<sizeof(req_fields)/sizeof(struct json_spec);i++)
{
tmp_val=tfe_http_std_field_read(http->req, req_fields[i].field_id);

View File

@@ -596,7 +596,6 @@ int traffic_mirror_on_open_cb(const struct tfe_stream * stream, unsigned int thr
if (!policy_ex_data)
{
TFE_LOG_ERROR(instance->logger, "failed at getting policy %s's EXDATA, detach the stream", str_policy_id);
goto detach;
}