TSG-148 修改录入 kafka 数据格式,修正代码格式
This commit is contained in:
@@ -8,6 +8,6 @@
|
|||||||
// return 0 for success, return -1 for failed
|
// return 0 for success, return -1 for failed
|
||||||
int ssl_mid_cert_kafka_logger_create(const char *profile, const char *section);
|
int ssl_mid_cert_kafka_logger_create(const char *profile, const char *section);
|
||||||
void ssl_mid_cert_kafka_logger_destory(void);
|
void ssl_mid_cert_kafka_logger_destory(void);
|
||||||
void ssl_fetch_trusted_cert_from_chain(STACK_OF(X509) * cert_chain, X509_STORE *trusted_store);
|
void ssl_fetch_trusted_cert_from_chain(STACK_OF(X509) *cert_chain, X509_STORE *trusted_store, const char *hostname);
|
||||||
|
|
||||||
#endif //TFE_SSL_FETCH_CERT_H
|
#endif //TFE_SSL_FETCH_CERT_H
|
||||||
@@ -6,50 +6,82 @@
|
|||||||
#include "tfe_utils.h"
|
#include "tfe_utils.h"
|
||||||
|
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
#include <cjson/cJSON.h>
|
||||||
#include <librdkafka/rdkafka.h>
|
#include <librdkafka/rdkafka.h>
|
||||||
#include <MESA/MESA_prof_load.h>
|
#include <MESA/MESA_prof_load.h>
|
||||||
|
|
||||||
typedef struct x509_object_st {
|
typedef struct x509_object_st {
|
||||||
int type;
|
int type;
|
||||||
union {
|
union {
|
||||||
char *ptr;
|
char *ptr;
|
||||||
X509 *x509;
|
X509 *x509;
|
||||||
X509_CRL *crl;
|
X509_CRL *crl;
|
||||||
EVP_PKEY *pkey;
|
EVP_PKEY *pkey;
|
||||||
} data;
|
} data;
|
||||||
} X509_OBJECT;
|
} X509_OBJECT;
|
||||||
|
|
||||||
typedef struct ssl_kafka_logger_s {
|
typedef struct ssl_kafka_logger_s {
|
||||||
int enable;
|
int enable;
|
||||||
char brokerlist[TFE_STRING_MAX];
|
|
||||||
char topicname[TFE_STRING_MAX];
|
|
||||||
|
|
||||||
rd_kafka_t *handle;
|
char tfe_ip[TFE_SYMBOL_MAX];
|
||||||
|
char topic_name[TFE_STRING_MAX];
|
||||||
|
char broker_list[TFE_STRING_MAX];
|
||||||
|
|
||||||
|
rd_kafka_t *handle;
|
||||||
rd_kafka_topic_t *topic;
|
rd_kafka_topic_t *topic;
|
||||||
} ssl_kafka_logger_t;
|
} ssl_kafka_logger_t;
|
||||||
|
|
||||||
static ssl_kafka_logger_t *g_kafka_logger = NULL;
|
static ssl_kafka_logger_t *g_kafka_logger = NULL;
|
||||||
|
|
||||||
static rd_kafka_t *create_kafka_handle(const char *brokerlist) {
|
static unsigned int get_ip_by_eth(const char *eth) {
|
||||||
char kafka_errstr[1024];
|
int sockfd = -1;
|
||||||
rd_kafka_t *handle = NULL;
|
unsigned int ip;
|
||||||
rd_kafka_conf_t *rdkafka_conf = NULL;
|
struct ifreq ifr;
|
||||||
|
|
||||||
rdkafka_conf = rd_kafka_conf_new();
|
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
|
||||||
rd_kafka_conf_set(rdkafka_conf, "queue.buffering.max.messages", "1000000", kafka_errstr, sizeof(kafka_errstr));
|
if (-1 == sockfd) {
|
||||||
rd_kafka_conf_set(rdkafka_conf, "topic.metadata.refresh.interval.ms", "600000", kafka_errstr, sizeof(kafka_errstr));
|
goto error;
|
||||||
rd_kafka_conf_set(rdkafka_conf, "security.protocol", "MG", kafka_errstr, sizeof(kafka_errstr));
|
}
|
||||||
|
|
||||||
//The conf object is freed by this function and must not be used or destroyed by the application sub-sequently.
|
memset(&ifr, 0, sizoef(ifr));
|
||||||
handle = rd_kafka_new(RD_KAFKA_PRODUCER, rdkafka_conf, kafka_errstr, sizeof(kafka_errstr));
|
strcpy(ifr.ifr_name, eth);
|
||||||
rdkafka_conf = NULL;
|
if (ioctl(sockfd, SIOCGIFADDR, &ifr) < 0) {
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
ip = ((struct sockaddr_in *)&(ifr.ifr_addr))->sin_addr.s_addr;
|
||||||
|
|
||||||
|
close(sockfd);
|
||||||
|
return ip;
|
||||||
|
|
||||||
|
error:
|
||||||
|
if (sockfd > 0)
|
||||||
|
close(sockfd);
|
||||||
|
return INADDR_NONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static rd_kafka_t *create_kafka_handle(const char *broker_list) {
|
||||||
|
char errstr[1024];
|
||||||
|
rd_kafka_t *handle = NULL;
|
||||||
|
rd_kafka_conf_t *conf = NULL;
|
||||||
|
|
||||||
|
conf = rd_kafka_conf_new();
|
||||||
|
rd_kafka_conf_set(conf, "queue.buffering.max.messages", "1000000", errstr, sizeof(errstr));
|
||||||
|
rd_kafka_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000", errstr, sizeof(errstr));
|
||||||
|
rd_kafka_conf_set(conf, "security.protocol", "MG", errstr, sizeof(errstr));
|
||||||
|
|
||||||
|
// The conf object is freed by this function and must not be used or destroyed by the application sub-sequently.
|
||||||
|
handle = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
|
||||||
|
conf = NULL;
|
||||||
if (handle == NULL) {
|
if (handle == NULL) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
if (rd_kafka_brokers_add(handle, brokerlist) == 0) {
|
|
||||||
|
if (rd_kafka_brokers_add(handle, broker_list) == 0) {
|
||||||
rd_kafka_destroy(handle);
|
rd_kafka_destroy(handle);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return handle;
|
return handle;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -58,108 +90,132 @@ void ssl_mid_cert_kafka_logger_destory(void) {
|
|||||||
if (g_kafka_logger->handle) {
|
if (g_kafka_logger->handle) {
|
||||||
free(g_kafka_logger->handle);
|
free(g_kafka_logger->handle);
|
||||||
}
|
}
|
||||||
|
if (g_kafka_logger->topic) {
|
||||||
|
free(g_kafka_logger->topic)
|
||||||
|
}
|
||||||
free(g_kafka_logger);
|
free(g_kafka_logger);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int ssl_mid_cert_kafka_logger_create(const char *profile, const char *section) {
|
int ssl_mid_cert_kafka_logger_create(const char *profile, const char *section)
|
||||||
const char *errstr = "ssl mid cert cache kafka logger create failed";
|
{
|
||||||
|
unsigned int ip;
|
||||||
|
char eth[64] = {0};
|
||||||
|
const char *errstr = "SSL mid cert cache occer error, ";
|
||||||
|
|
||||||
g_kafka_logger = ALLOC(ssl_kafka_logger_t, 1);
|
g_kafka_logger = ALLOC(ssl_kafka_logger_t, 1);
|
||||||
assert(g_kafka_logger);
|
assert(g_kafka_logger);
|
||||||
|
|
||||||
MESA_load_profile_int_def(profile, section, "mid_cert_cache_kafka_enable", &(g_kafka_logger->enable), 0);
|
MESA_load_profile_int_def(profile, section, "mc_cache_enable", &(g_kafka_logger->enable), 0);
|
||||||
if (!g_kafka_logger->enable) {
|
if (!g_kafka_logger->enable) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (MESA_load_profile_string_def(profile, section, "mid_cert_cache_kafka_brokerlist", g_kafka_logger->brokerlist,
|
MESA_load_profile_string_def(profile, section, "mc_cache_eth", eth, sizeof(eth), "eth0");
|
||||||
sizeof(g_kafka_logger->brokerlist), NULL) < 0) {
|
ip = get_ip_by_eth(eth);
|
||||||
TFE_LOG_ERROR(g_default_logger, "%s, No brokerlist in profile %s section %s.", errstr, profile, section);
|
if (ip == INADDR_NONE) {
|
||||||
|
TFE_LOG_ERROR(g_default_logger, "%s, Fail to get ip by %s.", errstr, eth);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
g_kafka_logger->handle = create_kafka_handle(g_kafka_logger->brokerlist);
|
inet_ntop(AF_INET, &ip, g_kafka_logger->tfe_ip, sizeof(g_kafka_logger->tfe_ip));
|
||||||
|
|
||||||
|
if (MESA_load_profile_string_def(profile, section, "mc_cache_broker_list", g_kafka_logger->broker_list, sizeof(g_kafka_logger->broker_list), NULL) < 0) {
|
||||||
|
TFE_LOG_ERROR(g_default_logger, "%s, Fail to get mc_cache_broker_list in profile %s section %s.", errstr, profile, section);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
g_kafka_logger->handle = create_kafka_handle(g_kafka_logger->broker_list);
|
||||||
if (g_kafka_logger->handle == NULL) {
|
if (g_kafka_logger->handle == NULL) {
|
||||||
TFE_LOG_ERROR(g_default_logger, "%s, Cannot create kafka handle with brokerlist: %s.", errstr,
|
TFE_LOG_ERROR(g_default_logger, "%s, Fail to create kafka handle with broker list: %s.", errstr, g_kafka_logger->broker_list);
|
||||||
g_kafka_logger->brokerlist);
|
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MESA_load_profile_string_def(profile, section, "mc_cache_topic", g_kafka_logger->topic_name, sizeof(g_kafka_logger->topic_name), "MID-CERT-CACHE-LOG");
|
||||||
MESA_load_profile_string_def(profile, section, "mid_cert_cache_kafka_topic", g_kafka_logger->topicname,
|
g_kafka_logger->topic = rd_kafka_topic_new(g_kafka_logger->handle, g_kafka_logger->topic_name, NULL);
|
||||||
sizeof(g_kafka_logger->topicname), "MID-CERT-CACHE-LOG");
|
|
||||||
g_kafka_logger->topic = rd_kafka_topic_new(g_kafka_logger->handle, g_kafka_logger->topicname, NULL);
|
|
||||||
if (g_kafka_logger->topic == NULL) {
|
if (g_kafka_logger->topic == NULL) {
|
||||||
TFE_LOG_ERROR(g_default_logger, "%s, Cannot create kafka handle with brokerlist: %s.", errstr,
|
TFE_LOG_ERROR(g_default_logger, "%s, Fail to create kafka topic with broker list: %s.", errstr, g_kafka_logger->broker_list);
|
||||||
g_kafka_logger->brokerlist);
|
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error:
|
error:
|
||||||
ssl_mid_cert_kafka_logger_destory();
|
ssl_mid_cert_kafka_logger_destory();
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ssl_mid_cert_kafka_logger_send(char *msg) {
|
void ssl_mid_cert_kafka_logger_send(const char *sni, const char *fingerprint, const char *cert)
|
||||||
if (g_kafka_logger == NULL || g_kafka_logger->enable == 0) {
|
{
|
||||||
|
if (g_kafka_logger == NULL || g_kafka_logger->enable == 0)
|
||||||
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
rd_kafka_produce(g_kafka_logger->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0,
|
cJSON *obj = NULL;
|
||||||
NULL);
|
cJSON *dup = NULL;
|
||||||
}
|
char *msg = NULL;
|
||||||
|
|
||||||
int ssl_mid_cert_kafka_logger_enable() {
|
obj = cJSON_CreateObject();
|
||||||
if (g_kafka_logger && g_kafka_logger->enable) {
|
cJSON_AddNumberToObject(obj, "sni", sni);
|
||||||
return 1;
|
cJSON_AddNumberToObject(obj, "fingerprint", fingerprint);
|
||||||
} else {
|
cJSON_AddStringToObject(obj, "cert", cert);
|
||||||
return 0;
|
cJSON_AddStringToObject(obj, "tfe_ip", g_kafka_logger->tfe_ip);
|
||||||
}
|
dup = cJSON_Duplicate(obj, 1);
|
||||||
|
msg = cJSON_PrintUnformatted(dup);
|
||||||
|
TFE_LOG_DEBUG(g_default_logger, "log to [%s] msg:%s", g_kafka_logger->topic_name, msg);
|
||||||
|
rd_kafka_produce(g_kafka_logger->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, msg, strlen(msg), NULL, 0, NULL);
|
||||||
|
|
||||||
|
free(msg);
|
||||||
|
JSON_Delete(dup);
|
||||||
|
cJSON_Delete(obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
// test use http://www.360.cn/
|
// test use http://www.360.cn/
|
||||||
void ssl_fetch_trusted_cert_from_chain(STACK_OF(X509) *cert_chain, X509_STORE *trusted_store) {
|
void ssl_fetch_trusted_cert_from_chain(STACK_OF(X509) * cert_chain, X509_STORE *trusted_store, const char *hostname) {
|
||||||
if (!ssl_mid_cert_kafka_logger_enable()) {
|
int ret;
|
||||||
|
int deep;
|
||||||
|
char *subj = NULL;
|
||||||
|
char *issuer = NULL;
|
||||||
|
char *fingerprint = NULL;
|
||||||
|
X509 *cert = NULL;
|
||||||
|
X509_LOOKUP *lookup = NULL;
|
||||||
|
X509_OBJECT stmp;
|
||||||
|
|
||||||
|
if (!g_kafka_logger || !g_kafka_logger->enable) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// range for [0, count - 1]
|
|
||||||
int count = sk_X509_num(cert_chain);
|
|
||||||
|
|
||||||
// don`t need call X509_LOOKUP_free(lookup)
|
// don`t need call X509_LOOKUP_free(lookup)
|
||||||
X509_LOOKUP *lookup = X509_STORE_add_lookup(trusted_store, X509_LOOKUP_hash_dir());
|
lookup = X509_STORE_add_lookup(trusted_store, X509_LOOKUP_hash_dir());
|
||||||
if (lookup == NULL) {
|
if (lookup == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 1; i < count; i++) {
|
deep = sk_X509_num(cert_chain);
|
||||||
// don1t need call X509_FREE(cert)
|
for (int i = 1; i < deep; i++) {
|
||||||
X509 *cert = sk_X509_value(cert_chain, i);
|
// need't call X509_FREE(cert)
|
||||||
|
cert = sk_X509_value(cert_chain, i);
|
||||||
assert(cert);
|
assert(cert);
|
||||||
|
|
||||||
X509_OBJECT stmp;
|
|
||||||
stmp.type = X509_LU_NONE;
|
stmp.type = X509_LU_NONE;
|
||||||
stmp.data.ptr = NULL;
|
stmp.data.ptr = NULL;
|
||||||
int result = X509_LOOKUP_by_subject(lookup, X509_LU_X509, X509_get_issuer_name(cert), &stmp);
|
ret = X509_LOOKUP_by_subject(lookup, X509_LU_X509, X509_get_issuer_name(cert), &stmp);
|
||||||
char *subj = ssl_x509_subject(cert);
|
subj = ssl_x509_subject(cert);
|
||||||
char *issuer = ssl_x509_issuer(cert);
|
issuer = ssl_x509_issuer(cert);
|
||||||
if (result) {
|
fingerprint = ssl_x509_fingerprint(cert, 0);
|
||||||
TFE_LOG_ERROR(g_default_logger, "[dep:%d/%d] subject:%s; issure:%s; in_trusted_store:1\n", i, count, subj,
|
TFE_LOG_DEBUG(g_default_logger, "[dep:%d/%d] subject:%s; issuer:%s; fingerprint:%s; in_trusted_store:%d", i, deep,
|
||||||
issuer);
|
subj ? subj : "NULL", issuer ? issuer : "NULL", fingerprint ? fingerprint : "NULL", ret);
|
||||||
// not use continue, case the intermediate certificate is exist and the root certificate is not exist.
|
if (!ret) {
|
||||||
/* continue; */
|
char *pem = ssl_x509_to_pem(cert);
|
||||||
} else {
|
if (pem) {
|
||||||
TFE_LOG_ERROR(g_default_logger, "[dep:%d/%d] subject:%s; issure:%s; in_trusted_store:0\n", i, count, subj,
|
ssl_mid_cert_kafka_logger_send(hostname, fingerprint, pem);
|
||||||
issuer);
|
free(pem);
|
||||||
char *string = ssl_x509_to_str(cert);
|
|
||||||
if (string) {
|
|
||||||
// printf("%s\n", string);
|
|
||||||
ssl_mid_cert_kafka_logger_send(string);
|
|
||||||
free(string);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
free(subj);
|
if (subj)
|
||||||
free(issuer);
|
free(subj);
|
||||||
|
if (issuer)
|
||||||
|
free(issuer);
|
||||||
|
if (fingerprint)
|
||||||
|
free(fingerprint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -404,7 +404,7 @@ int ssl_trusted_cert_storage_verify_conn(struct ssl_trusted_cert_storage* storag
|
|||||||
|
|
||||||
// case cert verify success
|
// case cert verify success
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
ssl_fetch_trusted_cert_from_chain(cert_chain, storage->effective_store);
|
ssl_fetch_trusted_cert_from_chain(cert_chain, storage->effective_store, hostname);
|
||||||
}
|
}
|
||||||
|
|
||||||
X509_STORE_CTX_free(ctx);
|
X509_STORE_CTX_free(ctx);
|
||||||
|
|||||||
Reference in New Issue
Block a user