完成pangu http发送业务日志功能开发。
This commit is contained in:
@@ -12,11 +12,13 @@
|
||||
#include <unistd.h>
|
||||
#include <sys/ioctl.h>
|
||||
#include <net/if.h>
|
||||
#include <pthread.h>
|
||||
#include <errno.h>
|
||||
|
||||
struct json_spec
|
||||
{
|
||||
int json_type;
|
||||
const char *name;
|
||||
const char *log_filed_name;
|
||||
enum tfe_http_std_field field_id;
|
||||
};
|
||||
|
||||
struct pangu_logger
|
||||
@@ -28,9 +30,8 @@ struct pangu_logger
|
||||
void* global_logger;
|
||||
rd_kafka_t *kafka_handle;
|
||||
rd_kafka_topic_t* kafka_topic;
|
||||
|
||||
pthread_mutex_t mutex;
|
||||
char brokerlist[TFE_STRING_MAX];
|
||||
struct json_spec opt2json[LOG_OPT_MAX];
|
||||
const char* topic_name;
|
||||
|
||||
void* local_logger;
|
||||
@@ -74,7 +75,6 @@ error:
|
||||
|
||||
static rd_kafka_t * create_kafka_handle(const char* brokerlist)
|
||||
{
|
||||
int i = 0;
|
||||
char kafka_errstr[1024];
|
||||
rd_kafka_t *handle=NULL;
|
||||
rd_kafka_conf_t *rdkafka_conf = NULL;
|
||||
@@ -103,25 +103,11 @@ static rd_kafka_t * create_kafka_handle(const char* brokerlist)
|
||||
|
||||
struct pangu_logger* pangu_log_handle_create(const char* profile, const char* section, void* local_logger)
|
||||
{
|
||||
int ret=-1,i=0;
|
||||
char addr_string[TFE_SYMBOL_MAX]={0},local_msg_dir[TFE_STRING_MAX]={0};
|
||||
int ret=-1;
|
||||
char nic_name[64]={0};
|
||||
unsigned int ip_buff[TFE_SYMBOL_MAX];
|
||||
|
||||
struct pangu_logger* instance=ALLOC(struct pangu_logger,1);
|
||||
instance->global_logger=local_logger;
|
||||
|
||||
instance->opt2json[LOG_OPT_HTTP_C2S_ISN] = {cJSON_Number,"isn"};
|
||||
instance->opt2json[LOG_OPT_HTTP_PROXY_FLAG] = {cJSON_Number,"proxy_flag"};
|
||||
instance->opt2json[LOG_OPT_HTTP_URL] = {cJSON_String,"url"};
|
||||
instance->opt2json[LOG_OPT_HTTP_COOKIE] = {cJSON_String,"cookie"};
|
||||
instance->opt2json[LOG_OPT_HTTP_REFERER] = {cJSON_String,"referer"};
|
||||
instance->opt2json[LOG_OPT_HTTP_UA] = {cJSON_String,"user_agent"};
|
||||
instance->opt2json[LOG_OPT_HTTP_REQ_LINE] = {cJSON_String,"req_line"};
|
||||
instance->opt2json[LOG_OPT_HTTP_RES_LINE] = {cJSON_String,"res_line"};
|
||||
instance->opt2json[LOG_OPT_HTTP_SET_COOKIE] = {cJSON_String,"set_cookie"};
|
||||
instance->opt2json[LOG_OPT_HTTP_CONTENT_TYPE] = {cJSON_String,"content_type"};
|
||||
instance->opt2json[LOG_OPT_HTTP_CONTENT_LEN] = {cJSON_String,"content_len"};
|
||||
instance->local_logger=local_logger;
|
||||
|
||||
TFE_LOG_ERROR(local_logger,"Pangu log is inititating from %s section %s.", profile, section);
|
||||
|
||||
@@ -150,16 +136,116 @@ struct pangu_logger* pangu_log_handle_create(const char* profile, const char* s
|
||||
}
|
||||
instance->topic_name="PXY_HTTP_LOG";
|
||||
instance->kafka_topic = rd_kafka_topic_new(instance->kafka_handle,instance->topic_name, NULL);
|
||||
|
||||
pthread_mutex_init(&(instance->mutex), NULL);
|
||||
return instance;
|
||||
|
||||
error_out:
|
||||
free(instance);
|
||||
return NULL;
|
||||
}
|
||||
int pangu_send_log(struct pangu_logger* logger, const struct pangu_log* log_msg,struct opt_unit* log_opt,int opt_num)
|
||||
static const char * _wrap_std_field_read(struct tfe_http_half * half, enum tfe_http_std_field field_id)
|
||||
{
|
||||
struct http_field_name tmp_name;
|
||||
tmp_name.field_id=field_id;
|
||||
tmp_name.field_name=NULL;
|
||||
return tfe_http_field_read(half, &tmp_name);
|
||||
}
|
||||
int pangu_send_log(struct pangu_logger* handle, const struct pangu_log* log_msg)
|
||||
{
|
||||
const struct tfe_http_session* http=log_msg->http;
|
||||
const struct tfe_stream_addr* addr=log_msg->stream->addr;
|
||||
const char* tmp_val=NULL;
|
||||
cJSON *common_obj=NULL, *per_hit_obj=NULL;
|
||||
char* log_payload=NULL;
|
||||
int kafka_status=0;
|
||||
time_t cur_time;
|
||||
char src_ip_str[MAX(INET6_ADDRSTRLEN,INET_ADDRSTRLEN)] = {0};
|
||||
char dst_ip_str[MAX(INET6_ADDRSTRLEN,INET_ADDRSTRLEN)] = {0};
|
||||
|
||||
|
||||
struct json_spec req_fields[]={ {"cookie", TFE_HTTP_COOKIE},
|
||||
{"referer", TFE_HTTP_REFERER},
|
||||
{"user_agent", TFE_HTTP_USER_AGENT} };
|
||||
|
||||
struct json_spec resp_fields[]={ {"content_type", TFE_HTTP_CONT_TYPE},
|
||||
{"content_len", TFE_HTTP_CONT_LENGTH} };
|
||||
|
||||
common_obj=cJSON_CreateObject();
|
||||
cur_time = time(NULL);
|
||||
|
||||
cJSON_AddNumberToObject(common_obj, "found_time", cur_time);
|
||||
cJSON_AddNumberToObject(common_obj, "recv_time", cur_time);
|
||||
|
||||
switch(addr->addrtype)
|
||||
{
|
||||
case TFE_ADDR_IPV4:
|
||||
cJSON_AddNumberToObject(common_obj, "addr_type", 4);
|
||||
inet_ntop(AF_INET, &addr->ipv4->saddr, src_ip_str, sizeof(src_ip_str));
|
||||
inet_ntop(AF_INET, &addr->ipv4->daddr, dst_ip_str, sizeof(dst_ip_str));
|
||||
cJSON_AddStringToObject(common_obj, "s_ip", src_ip_str);
|
||||
cJSON_AddStringToObject(common_obj, "d_ip", dst_ip_str);
|
||||
cJSON_AddNumberToObject(common_obj, "s_port", ntohs(addr->ipv4->source));
|
||||
cJSON_AddNumberToObject(common_obj, "d_port", ntohs(addr->ipv4->dest));
|
||||
cJSON_AddStringToObject(common_obj, "trans_proto", "IPv4_TCP");
|
||||
break;
|
||||
case TFE_ADDR_IPV6:
|
||||
cJSON_AddNumberToObject(common_obj, "addr_type", 6);
|
||||
inet_ntop(AF_INET6, &addr->ipv6->saddr, src_ip_str, sizeof(src_ip_str));
|
||||
inet_ntop(AF_INET6, &addr->ipv6->daddr, dst_ip_str, sizeof(dst_ip_str));
|
||||
cJSON_AddStringToObject(common_obj, "s_ip", src_ip_str);
|
||||
cJSON_AddStringToObject(common_obj, "d_ip", dst_ip_str);
|
||||
cJSON_AddNumberToObject(common_obj, "s_port", ntohs(addr->ipv6->source));
|
||||
cJSON_AddNumberToObject(common_obj, "d_port", ntohs(addr->ipv6->dest));
|
||||
cJSON_AddStringToObject(common_obj, "trans_proto", "IPv6_TCP");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
cJSON_AddNumberToObject(common_obj, "direction", 0); //0:域内->域外,1:域外->域内,描述的是CLIENT_IP信息
|
||||
cJSON_AddNumberToObject(common_obj, "stream_dir", 3); //1:c2s, 2:s2c, 3:double
|
||||
cJSON_AddStringToObject(common_obj, "cap_ip", handle->local_ip_str);
|
||||
cJSON_AddNumberToObject(common_obj, "entrance_id", handle->entry_id);
|
||||
cJSON_AddNumberToObject(common_obj, "device_id", 0);
|
||||
cJSON_AddStringToObject(common_obj, "user_region", "null");
|
||||
cJSON_AddStringToObject(common_obj, "url", http->req->req_spec.url);
|
||||
for(size_t i=0;i<sizeof(req_fields)/sizeof(struct json_spec);i++)
|
||||
{
|
||||
tmp_val=_wrap_std_field_read(http->req, req_fields[i].field_id);
|
||||
if(tmp_val!=NULL)
|
||||
{
|
||||
cJSON_AddStringToObject(common_obj,req_fields[i].log_filed_name, tmp_val);
|
||||
}
|
||||
}
|
||||
for(size_t i=0;i<sizeof(resp_fields)/sizeof(struct json_spec);i++)
|
||||
{
|
||||
tmp_val=_wrap_std_field_read(http->req, resp_fields[i].field_id);
|
||||
if(tmp_val!=NULL)
|
||||
{
|
||||
cJSON_AddStringToObject(common_obj,resp_fields[i].log_filed_name, tmp_val);
|
||||
}
|
||||
}
|
||||
for(int i=0; i<log_msg->result_num; i++)
|
||||
{
|
||||
if(log_msg->result[i].do_log==0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
per_hit_obj=cJSON_Duplicate(common_obj, 1);
|
||||
cJSON_AddNumberToObject(per_hit_obj, "cfg_id", log_msg->result[i].config_id);
|
||||
cJSON_AddNumberToObject(per_hit_obj, "service", log_msg->result[i].service_id);
|
||||
log_payload = cJSON_Print(per_hit_obj);
|
||||
kafka_status = rd_kafka_produce(handle->kafka_topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
|
||||
log_payload, strlen(log_payload), NULL, 0, NULL);
|
||||
free(log_payload);
|
||||
cJSON_free(per_hit_obj);
|
||||
if(kafka_status<0)
|
||||
{
|
||||
TFE_LOG_ERROR(handle->local_logger, "Kafka produce failed: %s", rd_kafka_err2name(rd_kafka_last_error()));
|
||||
}
|
||||
}
|
||||
|
||||
cJSON_free(common_obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user