feature: adding Kafka infrastructure

This commit is contained in:
luwenpeng
2024-07-17 15:47:07 +08:00
parent b59b736e4d
commit cc5a537940
14 changed files with 336 additions and 17 deletions

View File

@@ -1,6 +1,6 @@
add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp)
add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp src/kafka.cpp)
target_link_libraries(common PUBLIC cjson)
target_link_libraries(common PUBLIC MESA_handle_logger)
target_link_libraries(common PUBLIC MESA_handle_logger rdkafka)
target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include)

28
common/include/kafka.h Normal file
View File

@@ -0,0 +1,28 @@
#ifndef _KAFKA_H
#define _KAFKA_H
#ifdef __cplusplus
extern "C"
{
#endif
enum topic_idx
{
TOPIC_RULE_HITS,
// add more topic here
MAX_TOPIC_NUM,
};
struct kafka *kafka_create(const char *profile);
void kafka_destroy(struct kafka *handle);
// return 0: if success
// return -1: if failed
int kafka_send(struct kafka *handle, enum topic_idx idx, const char *data, int len);
#ifdef __cplusplus
}
#endif
#endif

View File

@@ -18,6 +18,7 @@ extern "C"
#define LOG_TAG_UTILS "UTILS"
#define LOG_TAG_HEALTH_CHECK "HEALTH_CHECK"
#define LOG_TAG_TIMESTAMP "TIMESTAMP"
#define LOG_TAG_KAFKA "KAFKA"
#define ATOMIC_INC(x) __atomic_fetch_add(x, 1, __ATOMIC_RELAXED)
#define ATOMIC_DEC(x) __atomic_fetch_sub(x, 1, __ATOMIC_RELAXED)

245
common/src/kafka.cpp Normal file
View File

@@ -0,0 +1,245 @@
#include <stdlib.h>
#include <string.h>
#include "log.h"
#include "utils.h"
#include "kafka.h"
#include <MESA/MESA_prof_load.h>
#include <librdkafka/rdkafka.h>
#define MAX_SYMBOL_LEN 128
#define KAFKA_LOG_ERROR(fmt, ...) LOG_ERROR("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__)
#define KAFKA_LOG_DEBUG(fmt, ...) LOG_DEBUG("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__)
struct config
{
char brokerlist[MAX_SYMBOL_LEN];
char sasl_username[MAX_SYMBOL_LEN];
char sasl_passwd[MAX_SYMBOL_LEN];
char topic_name[MAX_TOPIC_NUM][MAX_SYMBOL_LEN];
};
struct per_producer_per_topic
{
rd_kafka_t *producer;
rd_kafka_topic_t *topic;
};
struct kafka
{
struct config cfg;
struct per_producer_per_topic *pppt[MAX_TOPIC_NUM];
};
/******************************************************************************
* Private API
******************************************************************************/
static void per_producer_per_topic_free(struct per_producer_per_topic *pppt)
{
if (pppt)
{
if (pppt->topic)
{
rd_kafka_topic_destroy(pppt->topic);
pppt->topic = NULL;
}
if (pppt->producer)
{
rd_kafka_destroy(pppt->producer);
pppt->producer = NULL;
}
free(pppt);
pppt = NULL;
}
}
static struct per_producer_per_topic *per_producer_per_topic_new(const char *brokerlist, const char *sasl_username, const char *sasl_passwd, const char *topic_name)
{
char err_str[1024] = {0};
struct per_producer_per_topic *pppt = (struct per_producer_per_topic *)calloc(1, sizeof(struct per_producer_per_topic));
if (!pppt)
{
return NULL;
}
rd_kafka_conf_t *conf = rd_kafka_conf_new();
if (!conf)
{
KAFKA_LOG_ERROR("failed to create kafka conf");
goto error_out;
}
if (rd_kafka_conf_set(conf, "queue.buffering.max.messages", "1000000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka queue.buffering.max.messages, %s", err_str);
goto error_out;
}
if (rd_kafka_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka topic.metadata.refresh.interval.ms, %s", err_str);
goto error_out;
}
if (rd_kafka_conf_set(conf, "client.id", topic_name, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka client.id, %s", err_str);
goto error_out;
}
if (strlen(sasl_username) > 0 && strlen(sasl_passwd) > 0)
{
if (rd_kafka_conf_set(conf, "security.protocol", "sasl_plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str);
goto error_out;
}
if (rd_kafka_conf_set(conf, "sasl.mechanisms", "PLAIN", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka sasl.mechanisms, %s", err_str);
goto error_out;
}
if (rd_kafka_conf_set(conf, "sasl.username", sasl_username, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka sasl.username, %s", err_str);
goto error_out;
}
if (rd_kafka_conf_set(conf, "sasl.password", sasl_passwd, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka sasl.password, %s", err_str);
goto error_out;
}
}
else
{
if (rd_kafka_conf_set(conf, "security.protocol", "plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
{
KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str);
goto error_out;
}
}
// The conf object is freed by this function and must not be used or destroyed by the application sub-sequently.
pppt->producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, err_str, sizeof(err_str));
conf = NULL;
if (pppt->producer == NULL)
{
KAFKA_LOG_ERROR("failed to create kafka producer, %s", err_str);
goto error_out;
}
if (rd_kafka_brokers_add(pppt->producer, brokerlist) == 0)
{
KAFKA_LOG_ERROR("failed to add kafka brokers");
goto error_out;
}
pppt->topic = rd_kafka_topic_new(pppt->producer, topic_name, NULL);
if (pppt->topic == NULL)
{
KAFKA_LOG_ERROR("failed to create kafka topic: %s", topic_name);
goto error_out;
}
return pppt;
error_out:
if (conf)
{
rd_kafka_conf_destroy(conf);
}
per_producer_per_topic_free(pppt);
return NULL;
}
/******************************************************************************
* Public API -- Kafka
******************************************************************************/
// due to limit by client.id, need per producer per topic
struct kafka *kafka_create(const char *profile)
{
struct kafka *handle = (struct kafka *)calloc(1, sizeof(struct kafka));
if (!handle)
{
return NULL;
}
MESA_load_profile_string_def(profile, "kafka", "brokerlist", handle->cfg.brokerlist, sizeof(handle->cfg.brokerlist), "");
MESA_load_profile_string_def(profile, "kafka", "sasl_username", handle->cfg.sasl_username, sizeof(handle->cfg.sasl_username), "");
MESA_load_profile_string_def(profile, "kafka", "sasl_passwd", handle->cfg.sasl_passwd, sizeof(handle->cfg.sasl_passwd), "");
MESA_load_profile_string_def(profile, "kafka", "topic_name", handle->cfg.topic_name[TOPIC_RULE_HITS], sizeof(handle->cfg.topic_name[TOPIC_RULE_HITS]), "");
if (strlen(handle->cfg.brokerlist) == 0)
{
KAFKA_LOG_ERROR("brokerlist is empty");
goto error_out;
}
for (int i = 0; i < MAX_TOPIC_NUM; i++)
{
if (strlen(handle->cfg.topic_name[i]) == 0)
{
KAFKA_LOG_ERROR("topic_name[%d] is empty", i);
goto error_out;
}
}
for (int i = 0; i < MAX_TOPIC_NUM; i++)
{
handle->pppt[i] = per_producer_per_topic_new(handle->cfg.brokerlist, handle->cfg.sasl_username, handle->cfg.sasl_passwd, handle->cfg.topic_name[i]);
if (!handle->pppt[i])
{
goto error_out;
}
}
return handle;
error_out:
kafka_destroy(handle);
return NULL;
}
void kafka_destroy(struct kafka *handle)
{
if (handle)
{
for (int i = 0; i < MAX_TOPIC_NUM; i++)
{
per_producer_per_topic_free(handle->pppt[i]);
handle->pppt[i] = NULL;
}
free(handle);
handle = NULL;
}
}
int kafka_send(struct kafka *handle, enum topic_idx idx, const void *data, int len)
{
if (idx < 0 || idx >= MAX_TOPIC_NUM)
{
KAFKA_LOG_ERROR("invalid topic index: %d", idx);
return -1;
}
if (handle->pppt[idx])
{
if (rd_kafka_produce(handle->pppt[idx]->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, (void *)data, len, NULL, 0, NULL) == -1)
{
KAFKA_LOG_ERROR("failed to produce message with topic [%d], %s", idx, rd_kafka_err2str(rd_kafka_last_error()));
return -1;
}
else
{
return 0;
}
}
else
{
KAFKA_LOG_ERROR("topic %d not initialized", idx);
return -1;
}
}