diff --git a/ci/travis.sh b/ci/travis.sh index 7dd23f6..9b98f3f 100644 --- a/ci/travis.sh +++ b/ci/travis.sh @@ -42,6 +42,7 @@ yum install -y libMESA_field_stat2-devel yum install -y numactl-libs # required by mrzcpd yum install -y libibverbs # required by mrzcpd yum install -y libbreakpad_mini-devel +yum install -y librdkafka-devel-1.2.2.1218b3c if [ $ASAN_OPTION ] && [ -f "/opt/rh/devtoolset-7/enable" ]; then source /opt/rh/devtoolset-7/enable diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 5f6ff2e..247786b 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,6 +1,6 @@ -add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp) +add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp src/kafka.cpp) target_link_libraries(common PUBLIC cjson) -target_link_libraries(common PUBLIC MESA_handle_logger) +target_link_libraries(common PUBLIC MESA_handle_logger rdkafka) target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include) diff --git a/common/include/kafka.h b/common/include/kafka.h new file mode 100644 index 0000000..ffb9911 --- /dev/null +++ b/common/include/kafka.h @@ -0,0 +1,28 @@ +#ifndef _KAFKA_H +#define _KAFKA_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +enum topic_idx +{ + TOPIC_RULE_HITS, + + // add more topic here + + MAX_TOPIC_NUM, +}; + +struct kafka *kafka_create(const char *profile); +void kafka_destroy(struct kafka *handle); +// return 0: if success +// return -1: if failed +int kafka_send(struct kafka *handle, enum topic_idx idx, const char *data, int len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/common/include/utils.h b/common/include/utils.h index 713c254..8319ed1 100644 --- a/common/include/utils.h +++ b/common/include/utils.h @@ -18,6 +18,7 @@ extern "C" #define LOG_TAG_UTILS "UTILS" #define LOG_TAG_HEALTH_CHECK "HEALTH_CHECK" #define LOG_TAG_TIMESTAMP "TIMESTAMP" +#define LOG_TAG_KAFKA "KAFKA" #define ATOMIC_INC(x) __atomic_fetch_add(x, 1, __ATOMIC_RELAXED) #define ATOMIC_DEC(x) __atomic_fetch_sub(x, 1, __ATOMIC_RELAXED) diff --git a/common/src/kafka.cpp b/common/src/kafka.cpp new file mode 100644 index 0000000..2a06c92 --- /dev/null +++ b/common/src/kafka.cpp @@ -0,0 +1,245 @@ +#include +#include + +#include "log.h" +#include "utils.h" +#include "kafka.h" +#include +#include + +#define MAX_SYMBOL_LEN 128 + +#define KAFKA_LOG_ERROR(fmt, ...) LOG_ERROR("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__) +#define KAFKA_LOG_DEBUG(fmt, ...) LOG_DEBUG("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__) + +struct config +{ + char brokerlist[MAX_SYMBOL_LEN]; + char sasl_username[MAX_SYMBOL_LEN]; + char sasl_passwd[MAX_SYMBOL_LEN]; + char topic_name[MAX_TOPIC_NUM][MAX_SYMBOL_LEN]; +}; + +struct per_producer_per_topic +{ + rd_kafka_t *producer; + rd_kafka_topic_t *topic; +}; + +struct kafka +{ + struct config cfg; + struct per_producer_per_topic *pppt[MAX_TOPIC_NUM]; +}; + +/****************************************************************************** + * Private API + ******************************************************************************/ + +static void per_producer_per_topic_free(struct per_producer_per_topic *pppt) +{ + if (pppt) + { + if (pppt->topic) + { + rd_kafka_topic_destroy(pppt->topic); + pppt->topic = NULL; + } + + if (pppt->producer) + { + rd_kafka_destroy(pppt->producer); + pppt->producer = NULL; + } + + free(pppt); + pppt = NULL; + } +} + +static struct per_producer_per_topic *per_producer_per_topic_new(const char *brokerlist, const char *sasl_username, const char *sasl_passwd, const char *topic_name) +{ + char err_str[1024] = {0}; + struct per_producer_per_topic *pppt = (struct per_producer_per_topic *)calloc(1, sizeof(struct per_producer_per_topic)); + if (!pppt) + { + return NULL; + } + + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + if (!conf) + { + KAFKA_LOG_ERROR("failed to create kafka conf"); + goto error_out; + } + if (rd_kafka_conf_set(conf, "queue.buffering.max.messages", "1000000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka queue.buffering.max.messages, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka topic.metadata.refresh.interval.ms, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "client.id", topic_name, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka client.id, %s", err_str); + goto error_out; + } + if (strlen(sasl_username) > 0 && strlen(sasl_passwd) > 0) + { + if (rd_kafka_conf_set(conf, "security.protocol", "sasl_plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "sasl.mechanisms", "PLAIN", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka sasl.mechanisms, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "sasl.username", sasl_username, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka sasl.username, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "sasl.password", sasl_passwd, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka sasl.password, %s", err_str); + goto error_out; + } + } + else + { + if (rd_kafka_conf_set(conf, "security.protocol", "plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str); + goto error_out; + } + } + + // The conf object is freed by this function and must not be used or destroyed by the application sub-sequently. + pppt->producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, err_str, sizeof(err_str)); + conf = NULL; + if (pppt->producer == NULL) + { + KAFKA_LOG_ERROR("failed to create kafka producer, %s", err_str); + goto error_out; + } + + if (rd_kafka_brokers_add(pppt->producer, brokerlist) == 0) + { + KAFKA_LOG_ERROR("failed to add kafka brokers"); + goto error_out; + } + + pppt->topic = rd_kafka_topic_new(pppt->producer, topic_name, NULL); + if (pppt->topic == NULL) + { + KAFKA_LOG_ERROR("failed to create kafka topic: %s", topic_name); + goto error_out; + } + + return pppt; + +error_out: + if (conf) + { + rd_kafka_conf_destroy(conf); + } + + per_producer_per_topic_free(pppt); + return NULL; +} + +/****************************************************************************** + * Public API -- Kafka + ******************************************************************************/ + +// due to limit by client.id, need per producer per topic +struct kafka *kafka_create(const char *profile) +{ + struct kafka *handle = (struct kafka *)calloc(1, sizeof(struct kafka)); + if (!handle) + { + return NULL; + } + + MESA_load_profile_string_def(profile, "kafka", "brokerlist", handle->cfg.brokerlist, sizeof(handle->cfg.brokerlist), ""); + MESA_load_profile_string_def(profile, "kafka", "sasl_username", handle->cfg.sasl_username, sizeof(handle->cfg.sasl_username), ""); + MESA_load_profile_string_def(profile, "kafka", "sasl_passwd", handle->cfg.sasl_passwd, sizeof(handle->cfg.sasl_passwd), ""); + MESA_load_profile_string_def(profile, "kafka", "topic_name", handle->cfg.topic_name[TOPIC_RULE_HITS], sizeof(handle->cfg.topic_name[TOPIC_RULE_HITS]), ""); + + if (strlen(handle->cfg.brokerlist) == 0) + { + KAFKA_LOG_ERROR("brokerlist is empty"); + goto error_out; + } + + for (int i = 0; i < MAX_TOPIC_NUM; i++) + { + if (strlen(handle->cfg.topic_name[i]) == 0) + { + KAFKA_LOG_ERROR("topic_name[%d] is empty", i); + goto error_out; + } + } + + for (int i = 0; i < MAX_TOPIC_NUM; i++) + { + handle->pppt[i] = per_producer_per_topic_new(handle->cfg.brokerlist, handle->cfg.sasl_username, handle->cfg.sasl_passwd, handle->cfg.topic_name[i]); + if (!handle->pppt[i]) + { + goto error_out; + } + } + + return handle; + +error_out: + kafka_destroy(handle); + return NULL; +} + +void kafka_destroy(struct kafka *handle) +{ + if (handle) + { + for (int i = 0; i < MAX_TOPIC_NUM; i++) + { + per_producer_per_topic_free(handle->pppt[i]); + handle->pppt[i] = NULL; + } + + free(handle); + handle = NULL; + } +} + +int kafka_send(struct kafka *handle, enum topic_idx idx, const void *data, int len) +{ + if (idx < 0 || idx >= MAX_TOPIC_NUM) + { + KAFKA_LOG_ERROR("invalid topic index: %d", idx); + return -1; + } + + if (handle->pppt[idx]) + { + if (rd_kafka_produce(handle->pppt[idx]->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, (void *)data, len, NULL, 0, NULL) == -1) + { + KAFKA_LOG_ERROR("failed to produce message with topic [%d], %s", idx, rd_kafka_err2str(rd_kafka_last_error())); + return -1; + } + else + { + return 0; + } + } + else + { + KAFKA_LOG_ERROR("topic %d not initialized", idx); + return -1; + } +} \ No newline at end of file diff --git a/conf/sce.conf b/conf/sce.conf index e5c0fa7..91dd87f 100644 --- a/conf/sce.conf +++ b/conf/sce.conf @@ -35,7 +35,7 @@ json_cfg_file=resource/sce.json foreign_cont_dir=resource/foreign_files redis_db_idx=0 redis_server=127.0.0.1 -redis_port_range=6379 +redis_port_range=6379 max_chaining_size=32 [packet_io] @@ -71,7 +71,7 @@ prometheus_listen_port=9001 prometheus_listen_url=/sce_prometheus [metrics] -# Kafka Topic: POLICY-RULE-METRICS +# Kafka Topic: POLICY-RULE-METRIC enable=1 interval_s=1 telegraf_bind_address=127.0.0.1 @@ -85,4 +85,10 @@ path=/run/run/frr/bfdd.vty device=eth0 local_address=127.0.0.1 gateway=127.0.0.1 -icmp_cycle_time_s=10 \ No newline at end of file +icmp_cycle_time_s=10 + +[kafka] +brokerlist=192.168.40.224:9092 +sasl_username=admin +sasl_passwd=galaxy2019 +topic_name=POLICY-RULE-METRIC \ No newline at end of file diff --git a/platform/include/sce.h b/platform/include/sce.h index ab99933..63ddd7d 100644 --- a/platform/include/sce.h +++ b/platform/include/sce.h @@ -8,6 +8,7 @@ extern "C" #include +#include "kafka.h" #include "policy.h" #include "timestamp.h" #include "packet_io.h" @@ -99,6 +100,7 @@ struct sce_ctx int cpu_affinity_mask[MAX_THREAD_NUM]; cpu_set_t coremask; + struct kafka *kfk; struct timestamp *ts; struct packet_io *io; struct global_metrics *metrics; diff --git a/platform/src/global_metrics.cpp b/platform/src/global_metrics.cpp index e90477d..3aa7a94 100644 --- a/platform/src/global_metrics.cpp +++ b/platform/src/global_metrics.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include #include "log.h" diff --git a/platform/src/sce.cpp b/platform/src/sce.cpp index 5cb729c..4f7401a 100644 --- a/platform/src/sce.cpp +++ b/platform/src/sce.cpp @@ -3,6 +3,7 @@ #include "sce.h" #include "log.h" +#include "kafka.h" #include "global_metrics.h" char *memdup(const char *src, int len) @@ -87,6 +88,12 @@ struct sce_ctx *sce_ctx_create(const char *profile) CPU_SET(cpu_id, &sce_ctx->coremask); } + sce_ctx->kfk = kafka_create(profile); + if (sce_ctx->kfk == NULL) + { + goto error_out; + } + sce_ctx->ts = timestamp_new(sce_ctx->ts_update_interval_ms); sce_ctx->metrics = global_metrics_create(profile, sce_ctx->nr_worker_threads); if (sce_ctx->metrics == NULL) @@ -126,6 +133,7 @@ void sce_ctx_destory(struct sce_ctx *sce_ctx) policy_enforcer_destory(sce_ctx->enforcer); global_metrics_destory(sce_ctx->metrics); timestamp_free(sce_ctx->ts); + kafka_destroy(sce_ctx->kfk); free(sce_ctx); sce_ctx = NULL; diff --git a/test/gmock_marsio.cpp b/test/gmock_marsio.cpp index 28af25c..9712cca 100644 --- a/test/gmock_marsio.cpp +++ b/test/gmock_marsio.cpp @@ -2,7 +2,8 @@ #include #include #include -#include + +#include "gmock_marsio.h" struct mr_instance { diff --git a/test/gmock_marsio.h b/test/gmock_marsio.h new file mode 100644 index 0000000..6bc95b1 --- /dev/null +++ b/test/gmock_marsio.h @@ -0,0 +1,23 @@ +#ifndef _GMOCK_MARSIO_H +#define _GMOCK_MARSIO_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +#include + +// new add, only for gtest +void marsio_set_recv_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff); +void marsio_set_send_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff); +marsio_buff_t *marsio_get_recv_mbuff(struct mr_instance *instance); +marsio_buff_t *marsio_get_send_mbuff(struct mr_instance *instance); +int marsio_mbuff_cmp(marsio_buff_t *mbuff1, marsio_buff_t *mbuff2); +marsio_buff_t *marsio_mbuff_dup(marsio_buff_t *m); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/test/gtest_utils.h b/test/gtest_utils.h index 7bf214e..27b3bc4 100644 --- a/test/gtest_utils.h +++ b/test/gtest_utils.h @@ -1,30 +1,23 @@ #ifndef _GTEST_UTILS_H #define _GTEST_UTILS_H +#include + #ifdef __cplusplus extern "C" { #endif #include -#include #include "sce.h" #include "log.h" -#include #include "vxlan.h" #include "packet_io.h" #include "sf_metrics.h" #include "health_check.h" #include "global_metrics.h" - -// new add, only for gtest -extern void marsio_set_recv_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff); -extern void marsio_set_send_mbuff(struct mr_instance *instance, marsio_buff_t *mbuff); -extern marsio_buff_t *marsio_get_recv_mbuff(struct mr_instance *instance); -extern marsio_buff_t *marsio_get_send_mbuff(struct mr_instance *instance); -extern int marsio_mbuff_cmp(marsio_buff_t *mbuff1, marsio_buff_t *mbuff2); -extern marsio_buff_t *marsio_mbuff_dup(marsio_buff_t *m); +#include "gmock_marsio.h" #define set_metadata(meta, id, offset, is_ctrl, is_decrypt) \ { \ diff --git a/test/test_data/conf/sce.conf b/test/test_data/conf/sce.conf index c1f34f8..5d82e30 100644 --- a/test/test_data/conf/sce.conf +++ b/test/test_data/conf/sce.conf @@ -70,7 +70,7 @@ prometheus_listen_port=9001 prometheus_listen_url=/sce_prometheus [metrics] -# Kafka Topic: POLICY-RULE-METRICS +# Kafka Topic: POLICY-RULE-METRIC enable=0 interval_s=1 telegraf_bind_address=127.0.0.1 @@ -82,3 +82,9 @@ path=/run/run/frr/bfdd.vty device=eth0 local_address=127.0.0.1 gateway=127.0.0.1 + +[kafka] +brokerlist=192.168.40.224:9092 +sasl_username=admin +sasl_passwd=galaxy2019 +topic_name=POLICY-RULE-METRIC \ No newline at end of file diff --git a/vendor/CMakeLists.txt b/vendor/CMakeLists.txt index 2988dcc..816c8ba 100644 --- a/vendor/CMakeLists.txt +++ b/vendor/CMakeLists.txt @@ -40,6 +40,10 @@ set_property(TARGET cjson PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${INSTALL_DIR}/ set(MESA_FRAMEWORK_LIB_DIR /opt/MESA/lib) set(MESA_FRAMEWORK_INCLUDE_DIR /opt/MESA/include) +add_library(rdkafka SHARED IMPORTED GLOBAL) +set_property(TARGET rdkafka PROPERTY IMPORTED_LOCATION ${MESA_FRAMEWORK_LIB_DIR}/librdkafka.so) +set_property(TARGET rdkafka PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MESA_FRAMEWORK_INCLUDE_DIR}/MESA/) + add_library(MESA_handle_logger SHARED IMPORTED GLOBAL) set_property(TARGET MESA_handle_logger PROPERTY IMPORTED_LOCATION ${MESA_FRAMEWORK_LIB_DIR}/libMESA_handle_logger.so) set_property(TARGET MESA_handle_logger PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${MESA_FRAMEWORK_INCLUDE_DIR})