feature: adding Kafka infrastructure
This commit is contained in:
@@ -42,6 +42,7 @@ yum install -y libMESA_field_stat2-devel
|
||||
yum install -y numactl-libs # required by mrzcpd
|
||||
yum install -y libibverbs # required by mrzcpd
|
||||
yum install -y libbreakpad_mini-devel
|
||||
yum install -y librdkafka-devel-1.2.2.1218b3c
|
||||
|
||||
if [ $ASAN_OPTION ] && [ -f "/opt/rh/devtoolset-7/enable" ]; then
|
||||
source /opt/rh/devtoolset-7/enable
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp)
|
||||
add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp src/kafka.cpp)
|
||||
target_link_libraries(common PUBLIC cjson)
|
||||
target_link_libraries(common PUBLIC MESA_handle_logger)
|
||||
target_link_libraries(common PUBLIC MESA_handle_logger rdkafka)
|
||||
|
||||
target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include)
|
||||
|
||||
|
||||
28
common/include/kafka.h
Normal file
28
common/include/kafka.h
Normal file
@@ -0,0 +1,28 @@
|
||||
#ifndef _KAFKA_H
|
||||
#define _KAFKA_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
enum topic_idx
|
||||
{
|
||||
TOPIC_RULE_HITS,
|
||||
|
||||
// add more topic here
|
||||
|
||||
MAX_TOPIC_NUM,
|
||||
};
|
||||
|
||||
struct kafka *kafka_create(const char *profile);
|
||||
void kafka_destroy(struct kafka *handle);
|
||||
// return 0: if success
|
||||
// return -1: if failed
|
||||
int kafka_send(struct kafka *handle, enum topic_idx idx, const char *data, int len);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -18,6 +18,7 @@ extern "C"
|
||||
#define LOG_TAG_UTILS "UTILS"
|
||||
#define LOG_TAG_HEALTH_CHECK "HEALTH_CHECK"
|
||||
#define LOG_TAG_TIMESTAMP "TIMESTAMP"
|
||||
#define LOG_TAG_KAFKA "KAFKA"
|
||||
|
||||
#define ATOMIC_INC(x) __atomic_fetch_add(x, 1, __ATOMIC_RELAXED)
|
||||
#define ATOMIC_DEC(x) __atomic_fetch_sub(x, 1, __ATOMIC_RELAXED)
|
||||
|
||||
245
common/src/kafka.cpp
Normal file
245
common/src/kafka.cpp
Normal file
@@ -0,0 +1,245 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "log.h"
|
||||
#include "utils.h"
|
||||
#include "kafka.h"
|
||||
#include <MESA/MESA_prof_load.h>
|
||||
#include <librdkafka/rdkafka.h>
|
||||
|
||||
#define MAX_SYMBOL_LEN 128
|
||||
|
||||
#define KAFKA_LOG_ERROR(fmt, ...) LOG_ERROR("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__)
|
||||
#define KAFKA_LOG_DEBUG(fmt, ...) LOG_DEBUG("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__)
|
||||
|
||||
struct config
|
||||
{
|
||||
char brokerlist[MAX_SYMBOL_LEN];
|
||||
char sasl_username[MAX_SYMBOL_LEN];
|
||||
char sasl_passwd[MAX_SYMBOL_LEN];
|
||||
char topic_name[MAX_TOPIC_NUM][MAX_SYMBOL_LEN];
|
||||
};
|
||||
|
||||
struct per_producer_per_topic
|
||||
{
|
||||
rd_kafka_t *producer;
|
||||
rd_kafka_topic_t *topic;
|
||||
};
|
||||
|
||||
struct kafka
|
||||
{
|
||||
struct config cfg;
|
||||
struct per_producer_per_topic *pppt[MAX_TOPIC_NUM];
|
||||
};
|
||||
|
||||
/******************************************************************************
|
||||
* Private API
|
||||
******************************************************************************/
|
||||
|
||||
static void per_producer_per_topic_free(struct per_producer_per_topic *pppt)
|
||||
{
|
||||
if (pppt)
|
||||
{
|
||||
if (pppt->topic)
|
||||
{
|
||||
rd_kafka_topic_destroy(pppt->topic);
|
||||
pppt->topic = NULL;
|
||||
}
|
||||
|
||||
if (pppt->producer)
|
||||
{
|
||||
rd_kafka_destroy(pppt->producer);
|
||||
pppt->producer = NULL;
|
||||
}
|
||||
|
||||
free(pppt);
|
||||
pppt = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct per_producer_per_topic *per_producer_per_topic_new(const char *brokerlist, const char *sasl_username, const char *sasl_passwd, const char *topic_name)
|
||||
{
|
||||
char err_str[1024] = {0};
|
||||
struct per_producer_per_topic *pppt = (struct per_producer_per_topic *)calloc(1, sizeof(struct per_producer_per_topic));
|
||||
if (!pppt)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rd_kafka_conf_t *conf = rd_kafka_conf_new();
|
||||
if (!conf)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to create kafka conf");
|
||||
goto error_out;
|
||||
}
|
||||
if (rd_kafka_conf_set(conf, "queue.buffering.max.messages", "1000000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka queue.buffering.max.messages, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
if (rd_kafka_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka topic.metadata.refresh.interval.ms, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
if (rd_kafka_conf_set(conf, "client.id", topic_name, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka client.id, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
if (strlen(sasl_username) > 0 && strlen(sasl_passwd) > 0)
|
||||
{
|
||||
if (rd_kafka_conf_set(conf, "security.protocol", "sasl_plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
if (rd_kafka_conf_set(conf, "sasl.mechanisms", "PLAIN", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka sasl.mechanisms, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
if (rd_kafka_conf_set(conf, "sasl.username", sasl_username, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka sasl.username, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
if (rd_kafka_conf_set(conf, "sasl.password", sasl_passwd, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka sasl.password, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (rd_kafka_conf_set(conf, "security.protocol", "plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
}
|
||||
|
||||
// The conf object is freed by this function and must not be used or destroyed by the application sub-sequently.
|
||||
pppt->producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, err_str, sizeof(err_str));
|
||||
conf = NULL;
|
||||
if (pppt->producer == NULL)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to create kafka producer, %s", err_str);
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
if (rd_kafka_brokers_add(pppt->producer, brokerlist) == 0)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to add kafka brokers");
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
pppt->topic = rd_kafka_topic_new(pppt->producer, topic_name, NULL);
|
||||
if (pppt->topic == NULL)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to create kafka topic: %s", topic_name);
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
return pppt;
|
||||
|
||||
error_out:
|
||||
if (conf)
|
||||
{
|
||||
rd_kafka_conf_destroy(conf);
|
||||
}
|
||||
|
||||
per_producer_per_topic_free(pppt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Public API -- Kafka
|
||||
******************************************************************************/
|
||||
|
||||
// due to limit by client.id, need per producer per topic
|
||||
struct kafka *kafka_create(const char *profile)
|
||||
{
|
||||
struct kafka *handle = (struct kafka *)calloc(1, sizeof(struct kafka));
|
||||
if (!handle)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
MESA_load_profile_string_def(profile, "kafka", "brokerlist", handle->cfg.brokerlist, sizeof(handle->cfg.brokerlist), "");
|
||||
MESA_load_profile_string_def(profile, "kafka", "sasl_username", handle->cfg.sasl_username, sizeof(handle->cfg.sasl_username), "");
|
||||
MESA_load_profile_string_def(profile, "kafka", "sasl_passwd", handle->cfg.sasl_passwd, sizeof(handle->cfg.sasl_passwd), "");
|
||||
MESA_load_profile_string_def(profile, "kafka", "topic_name", handle->cfg.topic_name[TOPIC_RULE_HITS], sizeof(handle->cfg.topic_name[TOPIC_RULE_HITS]), "");
|
||||
|
||||
if (strlen(handle->cfg.brokerlist) == 0)
|
||||
{
|
||||
KAFKA_LOG_ERROR("brokerlist is empty");
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
for (int i = 0; i < MAX_TOPIC_NUM; i++)
|
||||
{
|
||||
if (strlen(handle->cfg.topic_name[i]) == 0)
|
||||
{
|
||||
KAFKA_LOG_ERROR("topic_name[%d] is empty", i);
|
||||
goto error_out;
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < MAX_TOPIC_NUM; i++)
|
||||
{
|
||||
handle->pppt[i] = per_producer_per_topic_new(handle->cfg.brokerlist, handle->cfg.sasl_username, handle->cfg.sasl_passwd, handle->cfg.topic_name[i]);
|
||||
if (!handle->pppt[i])
|
||||
{
|
||||
goto error_out;
|
||||
}
|
||||
}
|
||||
|
||||
return handle;
|
||||
|
||||
error_out:
|
||||
kafka_destroy(handle);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void kafka_destroy(struct kafka *handle)
|
||||
{
|
||||
if (handle)
|
||||
{
|
||||
for (int i = 0; i < MAX_TOPIC_NUM; i++)
|
||||
{
|
||||
per_producer_per_topic_free(handle->pppt[i]);
|
||||
handle->pppt[i] = NULL;
|
||||
}
|
||||
|
||||
free(handle);
|
||||
handle = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int kafka_send(struct kafka *handle, enum topic_idx idx, const void *data, int len)
|
||||
{
|
||||
if (idx < 0 || idx >= MAX_TOPIC_NUM)
|
||||
{
|
||||
KAFKA_LOG_ERROR("invalid topic index: %d", idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (handle->pppt[idx])
|
||||
{
|
||||
if (rd_kafka_produce(handle->pppt[idx]->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, (void *)data, len, NULL, 0, NULL) == -1)
|
||||
{
|
||||
KAFKA_LOG_ERROR("failed to produce message with topic [%d], %s", idx, rd_kafka_err2str(rd_kafka_last_error()));
|
||||
return -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
KAFKA_LOG_ERROR("topic %d not initialized", idx);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -71,7 +71,7 @@ prometheus_listen_port=9001
|
||||
prometheus_listen_url=/sce_prometheus
|
||||
|
||||
[metrics]
|
||||
# Kafka Topic: POLICY-RULE-METRICS
|
||||
# Kafka Topic: POLICY-RULE-METRIC
|
||||
enable=1
|
||||
interval_s=1
|
||||
telegraf_bind_address=127.0.0.1
|
||||
@@ -86,3 +86,9 @@ device=eth0
|
||||
local_address=127.0.0.1
|
||||
gateway=127.0.0.1
|
||||
icmp_cycle_time_s=10
|
||||
|
||||
[kafka]
|
||||
brokerlist=192.168.40.224:9092
|
||||
sasl_username=admin
|
||||
sasl_passwd=galaxy2019
|
||||
topic_name=POLICY-RULE-METRIC
|
||||
@@ -8,6 +8,7 @@ extern "C"
|
||||
|
||||
#include <sched.h>
|
||||
|
||||
#include "kafka.h"
|
||||
#include "policy.h"
|
||||
#include "timestamp.h"
|
||||
#include "packet_io.h"
|
||||
@@ -99,6 +100,7 @@ struct sce_ctx
|
||||
int cpu_affinity_mask[MAX_THREAD_NUM];
|
||||
|
||||
cpu_set_t coremask;
|
||||
struct kafka *kfk;
|
||||
struct timestamp *ts;
|
||||
struct packet_io *io;
|
||||
struct global_metrics *metrics;
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <assert.h>
|
||||
#include <MESA/field_stat2.h>
|
||||
#include <MESA/MESA_prof_load.h>
|
||||
|
||||
#include "log.h"
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
|
||||
#include "sce.h"
|
||||
#include "log.h"
|
||||
#include "kafka.h"
|
||||
#include "global_metrics.h"
|
||||
|
||||
char *memdup(const char *src, int len)
|
||||
@@ -87,6 +88,12 @@ struct sce_ctx *sce_ctx_create(const char *profile)
|
||||
CPU_SET(cpu_id, &sce_ctx->coremask);
|
||||
}
|
||||
|
||||
sce_ctx->kfk = kafka_create(profile);
|
||||
if (sce_ctx->kfk == NULL)
|
||||
{
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
sce_ctx->ts = timestamp_new(sce_ctx->ts_update_interval_ms);
|
||||
sce_ctx->metrics = global_metrics_create(profile, sce_ctx->nr_worker_threads);
|
||||
if (sce_ctx->metrics == NULL)
|
||||
@@ -126,6 +133,7 @@ void sce_ctx_destory(struct sce_ctx *sce_ctx)
|
||||
policy_enforcer_destory(sce_ctx->enforcer);
|
||||
global_metrics_destory(sce_ctx->metrics);
|
||||
timestamp_free(sce_ctx->ts);
|
||||
kafka_destroy(sce_ctx->kfk);
|
||||
|
||||
free(sce_ctx);
|
||||
sce_ctx = NULL;
|
||||
|
||||
@@ -2,7 +2,8 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdint.h>
|
||||
#include <marsio.h>
|
||||
|
||||
#include "gmock_marsio.h"
|
||||
|
||||
struct mr_instance
|
||||
{
|
||||
|
||||
23
test/gmock_marsio.h
Normal file
23
test/gmock_marsio.h
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef _GMOCK_MARSIO_H
|
||||
#define _GMOCK_MARSIO_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include <marsio.h>
|
||||
|
||||
// new add, only for gtest
|
||||
void marsio_set_recv_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff);
|
||||
void marsio_set_send_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff);
|
||||
marsio_buff_t *marsio_get_recv_mbuff(struct mr_instance *instance);
|
||||
marsio_buff_t *marsio_get_send_mbuff(struct mr_instance *instance);
|
||||
int marsio_mbuff_cmp(marsio_buff_t *mbuff1, marsio_buff_t *mbuff2);
|
||||
marsio_buff_t *marsio_mbuff_dup(marsio_buff_t *m);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,30 +1,23 @@
|
||||
#ifndef _GTEST_UTILS_H
|
||||
#define _GTEST_UTILS_H
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "sce.h"
|
||||
#include "log.h"
|
||||
#include <marsio.h>
|
||||
#include "vxlan.h"
|
||||
#include "packet_io.h"
|
||||
#include "sf_metrics.h"
|
||||
#include "health_check.h"
|
||||
#include "global_metrics.h"
|
||||
|
||||
// new add, only for gtest
|
||||
extern void marsio_set_recv_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff);
|
||||
extern void marsio_set_send_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff);
|
||||
extern marsio_buff_t *marsio_get_recv_mbuff(struct mr_instance *instance);
|
||||
extern marsio_buff_t *marsio_get_send_mbuff(struct mr_instance *instance);
|
||||
extern int marsio_mbuff_cmp(marsio_buff_t *mbuff1, marsio_buff_t *mbuff2);
|
||||
extern marsio_buff_t *marsio_mbuff_dup(marsio_buff_t *m);
|
||||
#include "gmock_marsio.h"
|
||||
|
||||
#define set_metadata(meta, id, offset, is_ctrl, is_decrypt) \
|
||||
{ \
|
||||
|
||||
@@ -70,7 +70,7 @@ prometheus_listen_port=9001
|
||||
prometheus_listen_url=/sce_prometheus
|
||||
|
||||
[metrics]
|
||||
# Kafka Topic: POLICY-RULE-METRICS
|
||||
# Kafka Topic: POLICY-RULE-METRIC
|
||||
enable=0
|
||||
interval_s=1
|
||||
telegraf_bind_address=127.0.0.1
|
||||
@@ -82,3 +82,9 @@ path=/run/run/frr/bfdd.vty
|
||||
device=eth0
|
||||
local_address=127.0.0.1
|
||||
gateway=127.0.0.1
|
||||
|
||||
[kafka]
|
||||
brokerlist=192.168.40.224:9092
|
||||
sasl_username=admin
|
||||
sasl_passwd=galaxy2019
|
||||
topic_name=POLICY-RULE-METRIC
|
||||
4
vendor/CMakeLists.txt
vendored
4
vendor/CMakeLists.txt
vendored
@@ -40,6 +40,10 @@ set_property(TARGET cjson PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INSTALL_DIR}/
|
||||
set(MESA_FRAMEWORK_LIB_DIR /opt/MESA/lib)
|
||||
set(MESA_FRAMEWORK_INCLUDE_DIR /opt/MESA/include)
|
||||
|
||||
add_library(rdkafka SHARED IMPORTED GLOBAL)
|
||||
set_property(TARGET rdkafka PROPERTY IMPORTED_LOCATION ${MESA_FRAMEWORK_LIB_DIR}/librdkafka.so)
|
||||
set_property(TARGET rdkafka PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MESA_FRAMEWORK_INCLUDE_DIR}/MESA/)
|
||||
|
||||
add_library(MESA_handle_logger SHARED IMPORTED GLOBAL)
|
||||
set_property(TARGET MESA_handle_logger PROPERTY IMPORTED_LOCATION ${MESA_FRAMEWORK_LIB_DIR}/libMESA_handle_logger.so)
|
||||
set_property(TARGET MESA_handle_logger PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MESA_FRAMEWORK_INCLUDE_DIR})
|
||||
|
||||
Reference in New Issue
Block a user