This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
tango-tfe/plugin/business/pangu-http/src/pangu_logger.cpp

367 lines
12 KiB
C++
Raw Normal View History

2018-09-14 11:42:22 +08:00
#include <cjson/cJSON.h>
#include <librdkafka/rdkafka.h>
2018-09-09 15:21:26 +08:00
#include <MESA/MESA_handle_logger.h>
#include <MESA/MESA_prof_load.h>
#include <assert.h>
#include <arpa/inet.h>
#include <time.h>
#include <stdio.h>
2018-09-14 11:42:22 +08:00
#include <unistd.h>
#include <sys/ioctl.h>
2018-09-14 16:33:36 +08:00
#include <net/if.h>
#include <pthread.h>
#include <errno.h>
2018-09-09 15:21:26 +08:00
#include <tfe_utils.h>
2018-12-24 22:47:26 +06:00
#include <cache_evbase_client.h>
#include "pangu_logger.h"
2018-09-09 15:21:26 +08:00
struct json_spec
{
const char *log_filed_name;
enum tfe_http_std_field field_id;
2018-09-09 15:21:26 +08:00
};
struct pangu_logger
{
char local_ip_str[TFE_SYMBOL_MAX];
int entry_id;
2019-05-15 16:09:27 +08:00
unsigned int en_sendlog;
unsigned int en_sendlog_meta;
unsigned int en_sendlog_body;
2018-09-09 15:21:26 +08:00
unsigned int local_ip_nr;
void* global_logger;
rd_kafka_t *kafka_handle;
rd_kafka_topic_t* kafka_topic;
pthread_mutex_t mutex;
2018-09-09 15:21:26 +08:00
char brokerlist[TFE_STRING_MAX];
char topic_name[TFE_STRING_MAX];
2018-09-09 15:21:26 +08:00
void* local_logger;
unsigned long long send_cnt;
unsigned long long random_drop;
unsigned long long user_abort;
char local_log_path[TFE_STRING_MAX];
2018-12-24 22:47:26 +06:00
struct cache_evbase_instance * log_file_upload_instance;
2018-09-09 15:21:26 +08:00
};
static unsigned int get_ip_by_eth_name(const char *ifname)
2018-09-09 15:21:26 +08:00
{
int sockfd;
struct ifreq ifr;
unsigned int ip;
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (-1 == sockfd)
2018-09-09 15:21:26 +08:00
{
goto error;
}
strcpy(ifr.ifr_name,ifname);
if (ioctl(sockfd, SIOCGIFADDR, &ifr) < 0)
{
goto error;
}
ip = ((struct sockaddr_in*)&(ifr.ifr_addr))->sin_addr.s_addr;
close(sockfd);
return ip;
error:
close(sockfd);
return INADDR_NONE;
}
static rd_kafka_t * create_kafka_handle(const char* brokerlist)
2018-09-09 15:21:26 +08:00
{
char kafka_errstr[1024];
rd_kafka_t *handle=NULL;
2018-09-09 15:21:26 +08:00
rd_kafka_conf_t *rdkafka_conf = NULL;
rdkafka_conf = rd_kafka_conf_new();
rd_kafka_conf_set(rdkafka_conf, "queue.buffering.max.messages", "1000000", kafka_errstr, sizeof(kafka_errstr));
rd_kafka_conf_set(rdkafka_conf, "topic.metadata.refresh.interval.ms", "600000",kafka_errstr, sizeof(kafka_errstr));
rd_kafka_conf_set(rdkafka_conf, "security.protocol", "MG", kafka_errstr, sizeof(kafka_errstr));
//The conf object is freed by this function and must not be used or destroyed by the application sub-sequently.
handle = rd_kafka_new(RD_KAFKA_PRODUCER, rdkafka_conf, kafka_errstr, sizeof(kafka_errstr));
rdkafka_conf=NULL;
if (handle==NULL)
{
return NULL;
}
if (rd_kafka_brokers_add(handle, brokerlist) == 0)
{
rd_kafka_destroy(handle);
return NULL;
}
return handle;
}
2018-09-14 16:33:36 +08:00
struct pangu_logger* pangu_log_handle_create(const char* profile, const char* section, void* local_logger)
2018-09-09 15:21:26 +08:00
{
int ret=-1;
2018-09-09 15:21:26 +08:00
char nic_name[64]={0};
2018-12-24 22:47:26 +06:00
struct tango_cache_parameter *log_file_upload_para=NULL;
2018-09-09 15:21:26 +08:00
struct pangu_logger* instance=ALLOC(struct pangu_logger,1);
instance->local_logger=local_logger;
2018-09-09 15:21:26 +08:00
2019-05-15 16:09:27 +08:00
TFE_LOG_INFO(local_logger,"Pangu log is inititating from %s section %s.", profile, section);
MESA_load_profile_uint_def(profile, section, "en_sendlog", &instance->en_sendlog, 1);
MESA_load_profile_uint_def(profile, section, "en_sendlog_meta", &instance->en_sendlog_meta, 1);
MESA_load_profile_uint_def(profile, section, "en_sendlog_body", &instance->en_sendlog_body, 1);
if (!instance->en_sendlog)
{
instance->en_sendlog_body = 0;
instance->en_sendlog_meta = 0;
}
TFE_LOG_INFO(local_logger, "Pangu sendlog : %s", instance->en_sendlog ? "ENABLE" : "DISABLE");
TFE_LOG_INFO(local_logger, "Pangu sendlog meta : %s", instance->en_sendlog_meta ? "ENABLE" : "DISABLE");
TFE_LOG_INFO(local_logger, "Pangu sendlog body : %s", instance->en_sendlog_body ? "ENABLE" : "DISABLE");
if (!instance->en_sendlog)
{
return instance;
}
2018-09-09 15:21:26 +08:00
MESA_load_profile_string_def(profile, section, "NIC_NAME",nic_name,sizeof(nic_name),"eth0");
instance->local_ip_nr=get_ip_by_eth_name(nic_name);
if(instance->local_ip_nr==INADDR_NONE)
{
2018-09-14 16:33:36 +08:00
TFE_LOG_ERROR(local_logger, "%s get NIC_NAME: %s error.", __FUNCTION__, nic_name);
2018-09-09 15:21:26 +08:00
goto error_out;
}
2019-05-15 16:09:27 +08:00
2018-09-09 15:21:26 +08:00
inet_ntop(AF_INET,&(instance->local_ip_nr),instance->local_ip_str,sizeof(instance->local_ip_str));
2018-09-09 15:21:26 +08:00
MESA_load_profile_int_def(profile, section, "ENTRANCE_ID",&(instance->entry_id),0);
ret=MESA_load_profile_string_def(profile, section,"KAFKA_BROKERLIST", instance->brokerlist, sizeof(instance->brokerlist), NULL);
if(ret<0)
{
2018-09-14 16:33:36 +08:00
TFE_LOG_ERROR(local_logger,"Pangu log init failed, no brokerlist in profile %s section %s.", profile, section);
2018-09-09 15:21:26 +08:00
goto error_out;
}
2019-05-15 16:09:27 +08:00
2018-09-09 15:21:26 +08:00
instance->kafka_handle=create_kafka_handle(instance->brokerlist);
if(instance->kafka_handle==NULL)
{
TFE_LOG_ERROR(local_logger,"Pangu log init failed. Cannot create lafka handle with brokerlist: %s.", instance->brokerlist);
2018-09-09 15:21:26 +08:00
goto error_out;
}
2019-05-15 16:09:27 +08:00
MESA_load_profile_string_def(profile, section,"KAFKA_TOPIC", instance->topic_name, sizeof(instance->topic_name), "POLICY-EVENT-LOG");
TFE_LOG_INFO(local_logger, "Pangu kafka brokerlist : %s", instance->brokerlist);
TFE_LOG_INFO(local_logger, "Pangu kafka topic : %s", instance->topic_name);
2018-09-09 15:21:26 +08:00
instance->kafka_topic = rd_kafka_topic_new(instance->kafka_handle,instance->topic_name, NULL);
2018-12-24 22:47:26 +06:00
log_file_upload_para=cache_evbase_parameter_new(profile, section, local_logger);
instance->log_file_upload_instance=cache_evbase_instance_new(log_file_upload_para, local_logger);
pthread_mutex_init(&(instance->mutex), NULL);
2018-09-09 15:21:26 +08:00
return instance;
2018-09-09 15:21:26 +08:00
error_out:
free(instance);
return NULL;
}
int pangu_send_log(struct pangu_logger* handle, const struct pangu_log* log_msg)
2018-09-09 15:21:26 +08:00
{
const struct tfe_http_session* http=log_msg->http;
const struct tfe_stream_addr* addr=log_msg->stream->addr;
const char* tmp_val=NULL;
cJSON *common_obj=NULL, *per_hit_obj=NULL;
char* log_payload=NULL;
int kafka_status=0;
int send_cnt=0;
2018-12-24 22:47:26 +06:00
int tmp=0;
time_t cur_time;
char src_ip_str[MAX(INET6_ADDRSTRLEN,INET_ADDRSTRLEN)] = {0};
char dst_ip_str[MAX(INET6_ADDRSTRLEN,INET_ADDRSTRLEN)] = {0};
const char *app_proto[]= {"unkonw","http1", "http2"};
struct json_spec req_fields[]={ {"http_cookie", TFE_HTTP_COOKIE},
{"http_referer", TFE_HTTP_REFERER},
{"http_user_agent", TFE_HTTP_USER_AGENT} };
struct json_spec resp_fields[]={ {"http_content_type", TFE_HTTP_CONT_TYPE},
{"http_content_length", TFE_HTTP_CONT_LENGTH},
{"http_set_cookie", TFE_HTTP_SET_COOKIE}};
2019-05-15 16:09:27 +08:00
if (!handle->en_sendlog)
{
return 0;
}
common_obj=cJSON_CreateObject();
cur_time = time(NULL);
cJSON_AddNumberToObject(common_obj, "common_start_time", cur_time);
cJSON_AddNumberToObject(common_obj, "common_end_time", cur_time);
cJSON_AddStringToObject(common_obj, "http_version", app_proto[http->major_version]);
uint64_t opt_val;
uint16_t opt_out_size;
struct tfe_cmsg * cmsg = tfe_stream_get0_cmsg(log_msg->stream);
if (cmsg!=NULL)
{
int ret = tfe_cmsg_get_value(cmsg, TFE_CMSG_STREAM_TRACE_ID, (unsigned char *) &opt_val, sizeof(opt_val), &opt_out_size);
if (ret==0)
{
cJSON_AddNumberToObject(common_obj, "common_stream_trace_id", opt_val);
}
}
2018-09-09 15:21:26 +08:00
switch(addr->addrtype)
{
case TFE_ADDR_STREAM_TUPLE4_V4:
cJSON_AddNumberToObject(common_obj, "common_address_type", 4);
inet_ntop(AF_INET, &addr->tuple4_v4->saddr, src_ip_str, sizeof(src_ip_str));
inet_ntop(AF_INET, &addr->tuple4_v4->daddr, dst_ip_str, sizeof(dst_ip_str));
cJSON_AddStringToObject(common_obj, "common_client_ip", src_ip_str);
cJSON_AddStringToObject(common_obj, "common_server_ip", dst_ip_str);
cJSON_AddNumberToObject(common_obj, "common_client_port", ntohs(addr->tuple4_v4->source));
cJSON_AddNumberToObject(common_obj, "common_server_port", ntohs(addr->tuple4_v4->dest));
cJSON_AddStringToObject(common_obj, "common_l4_protocol", "IPv4_TCP");
break;
case TFE_ADDR_STREAM_TUPLE4_V6:
cJSON_AddNumberToObject(common_obj, "common_address_type", 6);
inet_ntop(AF_INET6, &addr->tuple4_v6->saddr, src_ip_str, sizeof(src_ip_str));
inet_ntop(AF_INET6, &addr->tuple4_v6->daddr, dst_ip_str, sizeof(dst_ip_str));
cJSON_AddStringToObject(common_obj, "common_client_ip", src_ip_str);
cJSON_AddStringToObject(common_obj, "common_server_ip", dst_ip_str);
cJSON_AddNumberToObject(common_obj, "common_client_port", ntohs(addr->tuple4_v6->source));
cJSON_AddNumberToObject(common_obj, "common_server_port", ntohs(addr->tuple4_v6->dest));
cJSON_AddStringToObject(common_obj, "common_l4_protocol", "IPv6_TCP");
break;
default:
break;
}
cJSON_AddNumberToObject(common_obj, "common_direction", 0); //0域内->域外1域外->域内描述的是CLIENT_IP信息
cJSON_AddNumberToObject(common_obj, "common_link_id", 0);
cJSON_AddNumberToObject(common_obj, "common_stream_dir", 3); //1:c2s, 2:s2c, 3:double
cJSON_AddStringToObject(common_obj, "common_sled_ip", handle->local_ip_str);
cJSON_AddNumberToObject(common_obj, "common_entrance_id", handle->entry_id);
cJSON_AddNumberToObject(common_obj, "common_device_id", 0);
cJSON_AddStringToObject(common_obj, "http_url", http->req->req_spec.url);
cJSON_AddStringToObject(common_obj, "http_host", http->req->req_spec.host);
for(size_t i=0;i<sizeof(req_fields)/sizeof(struct json_spec);i++)
{
tmp_val=tfe_http_std_field_read(http->req, req_fields[i].field_id);
if(tmp_val!=NULL)
{
cJSON_AddStringToObject(common_obj,req_fields[i].log_filed_name, tmp_val);
}
}
2018-10-18 16:20:22 +08:00
for(size_t i=0;i<sizeof(resp_fields)/sizeof(struct json_spec) && http->resp!=NULL;i++)
{
tmp_val=tfe_http_std_field_read(http->resp, resp_fields[i].field_id);
if(tmp_val!=NULL)
{
cJSON_AddStringToObject(common_obj,resp_fields[i].log_filed_name, tmp_val);
}
}
2018-12-24 22:47:26 +06:00
char log_file_upload_path[TFE_STRING_MAX]={0}, cont_type_whole[TFE_STRING_MAX]={0};
struct tango_cache_meta_put meta;
char* log_file_key=NULL;;
const char* cont_type_val;
if(log_msg->req_body!=NULL)
{
memset(&meta, 0, sizeof(meta));
asprintf(&log_file_key, "%s.reqbody", http->req->req_spec.url);
meta.url=log_file_key;
cont_type_val=tfe_http_std_field_read(http->req, TFE_HTTP_CONT_TYPE);
if(cont_type_val!=NULL)
{
snprintf(cont_type_whole, sizeof(cont_type_whole), "Content-Type:%s", cont_type_val);
meta.std_hdr[0]=cont_type_whole;
}
tmp=cache_evbase_upload_once_evbuf(handle->log_file_upload_instance, NULL,
log_msg->req_body,
&meta,
2018-12-24 22:47:26 +06:00
log_file_upload_path, sizeof(log_file_upload_path));
if(tmp==0)
{
cJSON_AddStringToObject(common_obj, "http_request_body", log_file_upload_path);
2018-12-24 22:47:26 +06:00
}
else
{
TFE_LOG_ERROR(handle->local_logger, "Upload req_body failed.");
}
free(log_file_key);
}
if(log_msg->resp_body!=NULL)
{
memset(&meta, 0, sizeof(meta));
asprintf(&log_file_key, "%s.respbody", http->req->req_spec.url);
meta.url=log_file_key;
cont_type_val=tfe_http_std_field_read(http->resp, TFE_HTTP_CONT_TYPE);
if(cont_type_val!=NULL)
{
snprintf(cont_type_whole, sizeof(cont_type_whole), "Content-Type:%s", cont_type_val);
meta.std_hdr[0]=cont_type_whole;
}
tmp=cache_evbase_upload_once_evbuf(handle->log_file_upload_instance, NULL,
log_msg->resp_body,
&meta,
2018-12-24 22:47:26 +06:00
log_file_upload_path, sizeof(log_file_upload_path));
if(tmp==0)
{
cJSON_AddStringToObject(common_obj, "http_response_body", log_file_upload_path);
2018-12-24 22:47:26 +06:00
}
else
{
TFE_LOG_ERROR(handle->local_logger, "Upload resp_body failed.");
}
free(log_file_key);
}
for(size_t i=0; i<log_msg->result_num; i++)
{
TFE_LOG_DEBUG(handle->local_logger, "URL: %s, policy_id: %d, service: %d, do_log:%d",
2018-12-17 16:40:28 +06:00
http->req->req_spec.url,
log_msg->result[i].config_id,
log_msg->result[i].service_id,
log_msg->result[i].do_log);
2018-12-17 16:40:28 +06:00
if(log_msg->result[i].do_log==0)
{
continue;
}
per_hit_obj=cJSON_Duplicate(common_obj, 1);
cJSON_AddNumberToObject(per_hit_obj, "common_policy_id", log_msg->result[i].config_id);
cJSON_AddNumberToObject(per_hit_obj, "common_service", log_msg->result[i].service_id);
cJSON_AddNumberToObject(per_hit_obj, "common_action", (unsigned char)log_msg->result[i].action);
log_payload = cJSON_PrintUnformatted(per_hit_obj);
TFE_LOG_DEBUG(handle->local_logger, "%s", log_payload);
kafka_status = rd_kafka_produce(handle->kafka_topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
log_payload, strlen(log_payload), NULL, 0, NULL);
free(log_payload);
cJSON_Delete(per_hit_obj);
if(kafka_status<0)
{
TFE_LOG_ERROR(handle->local_logger, "Kafka produce failed: %s", rd_kafka_err2name(rd_kafka_last_error()));
}
send_cnt++;
}
cJSON_Delete(common_obj);
return send_cnt;
2018-09-09 15:21:26 +08:00
}