feature: packet IO support IP reassembly

This commit is contained in:
luwenpeng
2024-10-23 10:01:20 +08:00
parent a7b79a0e22
commit fd3cc20554
54 changed files with 3474 additions and 4271 deletions

View File

@@ -6,30 +6,18 @@
app_symbol = "stellar"
dev_symbol = "nf_0_fw"
pcap_path = "/tmp/test.pcap"
nr_worker_thread = 1 # range: [1, 256]
thread_num = 1 # range: [1, 256]
cpu_mask = [5, 6, 7, 8, 9, 10, 11, 12]
idle_yield_interval_ms = 900 # range: [0, 60000] (ms)
idle_yield_ms = 900 # range: [0, 60000] (ms)
[[module]]
path=""
init="packet_manager_on_init"
exit="packet_manager_on_exit"
[packet_io.packet_pool]
capacity = 1024 # range: [1, 4294967295]
[[module]]
path=""
init="session_manager_on_init"
exit="session_manager_on_exit"
[ip_reassembly]
enable = 1
bucket_entries = 32 # range: [1, 4294967295] (must be power of 2)
bucket_num = 1024 # range: [1, 4294967295]
ip_frag_timeout_ms = 10000 # range: [1, 60000] (ms)
ip_frag_expire_polling_interval_ms = 0 # range: [0, 60000] (ms)
ip_frag_expire_polling_limit = 1024 # range: [1, 1024]
[packet_io.ip_reassembly]
fail_action = 1 # 0: bypass, 1: drop
timeout_ms = 1000 # range: [1, 60000] (ms)
frag_queue_num = 1024 # range: [1, 4294967295]
frag_queue_size = 64 # range: [2, 65535]
[session_manager]
tcp_session_max = 50000
@@ -71,14 +59,17 @@ exit="session_manager_on_exit"
timeout_ms = 10000 # range: [1, 60000] (ms)
buffered_segments_max = 256 # range: [2, 4096] per flow
[stat]
merge_interval_ms = 500 # range: [0, 60000] (ms)
output_interval_ms = 1000 # range: [0, 60000] (ms)
[log]
output = "both" # stderr, file, both
file = "log/stellar.log"
level = "INFO" # TRACE, DEBUG, INFO, WARN, ERROR, FATAL
[[module]]
path = ""
init = "packet_manager_on_init"
exit = "packet_manager_on_exit"
[[module]]
path = ""
init = "session_manager_on_init"
exit = "session_manager_on_exit"

45
deps/logger/log.c vendored
View File

@@ -28,7 +28,7 @@ struct log_config
struct logger
{
char config_file[PATH_MAX];
char toml_file[PATH_MAX];
struct log_config config;
int log_fd;
@@ -87,15 +87,14 @@ static int str_to_level(const char *level)
return -1;
}
}
static int config_parse(struct log_config *config, const char *config_file)
{
int ret = -1;
FILE *fp = NULL;
char errbuf[200];
char *ptr_output = NULL;
char *ptr_file = NULL;
char *ptr_level = NULL;
char *output = NULL;
char *file = NULL;
char *level = NULL;
const char *ptr;
toml_table_t *section = NULL;
toml_table_t *table = NULL;
@@ -122,20 +121,20 @@ static int config_parse(struct log_config *config, const char *config_file)
}
ptr = toml_raw_in(section, "output");
if (ptr == NULL || toml_rtos(ptr, &ptr_output) != 0)
if (ptr == NULL || toml_rtos(ptr, &output) != 0)
{
fprintf(stderr, "(logger) config file %s missing log.output\n", config_file);
goto error_out;
}
if (strcasecmp(ptr_output, "stderr") == 0)
if (strcasecmp(output, "stderr") == 0)
{
config->output = LOG_OUTPUT_STDERR;
}
else if (strcasecmp(ptr_output, "file") == 0)
else if (strcasecmp(output, "file") == 0)
{
config->output = LOG_OUTPUT_FILE;
}
else if (strcasecmp(ptr_output, "both") == 0)
else if (strcasecmp(output, "both") == 0)
{
config->output = LOG_OUTPUT_BOTH;
}
@@ -148,21 +147,21 @@ static int config_parse(struct log_config *config, const char *config_file)
if (config->output == LOG_OUTPUT_FILE || config->output == LOG_OUTPUT_BOTH)
{
ptr = toml_raw_in(section, "file");
if (ptr == NULL || toml_rtos(ptr, &ptr_file) != 0)
if (ptr == NULL || toml_rtos(ptr, &file) != 0)
{
fprintf(stderr, "(logger) config file %s missing log.file\n", config_file);
goto error_out;
}
strcpy(config->log_file, ptr_file);
strcpy(config->log_file, file);
}
ptr = toml_raw_in(section, "level");
if (ptr == NULL || toml_rtos(ptr, &ptr_level) != 0)
if (ptr == NULL || toml_rtos(ptr, &level) != 0)
{
fprintf(stderr, "(logger) config file %s missing log.level\n", config_file);
goto error_out;
}
config->level = (enum log_level)str_to_level(ptr_level);
config->level = (enum log_level)str_to_level(level);
if ((int)config->level == -1)
{
fprintf(stderr, "config file %s invalid log.level\n", config_file);
@@ -172,17 +171,17 @@ static int config_parse(struct log_config *config, const char *config_file)
ret = 0;
error_out:
if (ptr_output)
if (output)
{
free(ptr_output);
free(output);
}
if (ptr_file)
if (file)
{
free(ptr_file);
free(file);
}
if (ptr_level)
if (level)
{
free(ptr_level);
free(level);
}
if (table)
{
@@ -228,7 +227,7 @@ static int log_file_reopen(struct logger *logger)
* Public API
******************************************************************************/
struct logger *log_new(const char *config_file)
struct logger *log_new(const char *toml_file)
{
struct logger *logger = (struct logger *)calloc(1, sizeof(struct logger));
if (logger == NULL)
@@ -237,8 +236,8 @@ struct logger *log_new(const char *config_file)
return NULL;
}
memcpy(&logger->config_file, config_file, strlen(config_file));
if (config_parse(&logger->config, config_file) != 0)
memcpy(&logger->toml_file, toml_file, strlen(toml_file));
if (config_parse(&logger->config, toml_file) != 0)
{
goto error_out;
}
@@ -292,7 +291,7 @@ int log_check_level(struct logger *logger, enum log_level level)
void log_reload_level(struct logger *logger)
{
struct log_config config = {};
if (config_parse(&config, logger->config_file) == 0)
if (config_parse(&config, logger->toml_file) == 0)
{
logger->config.level = config.level;
fprintf(stderr, "(logger) logger level reload to %s\n", level_str[config.level]);

View File

@@ -9,7 +9,7 @@ extern "C"
extern __thread struct logger *__thread_local_logger;
struct logger *log_new(const char *config_file);
struct logger *log_new(const char *toml_file);
void log_free(struct logger *logger);
void log_reload_level(struct logger *logger);

View File

@@ -26,14 +26,14 @@ struct packet_manager *stellar_module_get_packet_manager(struct stellar_module_m
int packet_manager_new_packet_exdata_index(struct packet_manager *pkt_mgr, const char *name, exdata_free *func, void *arg);
typedef void on_packet_stage_callback(enum packet_stage stage, struct packet *pkt, void *args);
int packet_manager_subscribe(struct packet_manager *pkt_mgr, enum packet_stage stage, on_packet_stage_callback *cb, void *args);
typedef void on_packet_stage_callback(enum packet_stage stage, struct packet *pkt, void *arg);
int packet_manager_subscribe(struct packet_manager *pkt_mgr, enum packet_stage stage, on_packet_stage_callback *cb, void *arg);
// if two modules claim the same packet at the same stage, the second 'claim' fails.
// return 0 on success
// return -1 on failure
typedef void on_packet_claimed_callback(struct packet *pkt, void *args);
int packet_manager_claim_packet(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt, on_packet_claimed_callback cb, void *args);
typedef void on_packet_claimed_callback(struct packet *pkt, void *arg);
int packet_manager_claim_packet(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt, on_packet_claimed_callback cb, void *arg);
void packet_manager_schedule_packet(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt, enum packet_stage stage);
#ifdef __cplusplus

View File

@@ -8,9 +8,7 @@ foreach(infra ${INFRA})
add_subdirectory(${infra})
endforeach()
add_library(stellar_lib SHARED stellar_core.c stellar_stat.c)
add_dependencies(stellar_lib ${WHOLE_ARCHIVE})
add_library(stellar_lib SHARED stellar_core.c)
set_target_properties(stellar_lib PROPERTIES LINK_FLAGS "-Wl,--version-script=${CMAKE_CURRENT_LIST_DIR}/version.map")
target_link_libraries(stellar_lib PRIVATE pthread -Wl,--whole-archive ${WHOLE_ARCHIVE} -Wl,--no-whole-archive)
@@ -18,9 +16,7 @@ target_link_libraries(stellar_lib PUBLIC ${LIBS})
target_link_options(stellar_lib PRIVATE -rdynamic)
set_target_properties(stellar_lib PROPERTIES OUTPUT_NAME "stellar")
add_executable(stellar main.c stellar_core.c stellar_stat.c)
add_dependencies(stellar_lib ${WHOLE_ARCHIVE})
add_executable(stellar main.c stellar_core.c)
set_target_properties(stellar PROPERTIES LINK_FLAGS "-Wl,--version-script=${CMAKE_CURRENT_LIST_DIR}/version.map")
target_link_libraries(stellar PRIVATE pthread -Wl,--whole-archive ${WHOLE_ARCHIVE} -Wl,--no-whole-archive ${LIBS})
target_link_options(stellar PRIVATE -rdynamic)

View File

@@ -3,4 +3,4 @@ target_include_directories(ip_reassembly PUBLIC ${CMAKE_CURRENT_LIST_DIR})
target_include_directories(ip_reassembly PUBLIC ${CMAKE_SOURCE_DIR}/deps/crc32)
target_link_libraries(ip_reassembly packet_manager)
add_subdirectory(test)
add_subdirectory(test)

File diff suppressed because it is too large Load Diff

View File

@@ -5,66 +5,47 @@ extern "C"
{
#endif
struct ip_reassembly_config
{
uint8_t enable;
uint32_t bucket_entries; // range: [1, 4294967295] (must be power of 2)
uint32_t bucket_num; // range: [1, 4294967295]
uint64_t ip_frag_timeout_ms; // range: [1, 60000] (ms)
uint64_t ip_frag_expire_polling_interval_ms; // range: [0, 60000] (ms)
uint64_t ip_frag_expire_polling_limit; // range: [1, 1024]
};
#include <stdint.h>
struct ip_reassembly_stat
{
// IPv4 frag stat
uint64_t ip4_defrags_expected;
uint64_t ip4_defrags_succeed;
uint64_t ip4_defrags_failed_timeout;
uint64_t ip4_defrags_failed_invalid_length;
uint64_t ip4_defrags_failed_overlap;
uint64_t ip4_defrags_failed_too_many_frag;
uint64_t ip4_defrags_failed;
uint64_t ip4_frags;
uint64_t ip4_frags_freed;
uint64_t ip4_frags_buffered;
uint64_t ip4_frags_bypass_no_buffer;
uint64_t ip4_frags_bypass_dup_fist_frag;
uint64_t ip4_frags_bypass_dup_last_frag;
uint64_t ip4_frags_no_buffer;
uint64_t ip4_frags_timeout;
uint64_t ip4_frags_invalid_length;
uint64_t ip4_frags_overlap;
uint64_t ip4_frags_too_many;
// IPv6 frag stat
uint64_t ip6_defrags_expected;
uint64_t ip6_defrags_succeed;
uint64_t ip6_defrags_failed_timeout;
uint64_t ip6_defrags_failed_invalid_length;
uint64_t ip6_defrags_failed_overlap;
uint64_t ip6_defrags_failed_too_many_frag;
uint64_t ip6_defrags_failed;
uint64_t ip6_frags;
uint64_t ip6_frags_freed;
uint64_t ip6_frags_buffered;
uint64_t ip6_frags_bypass_no_buffer;
uint64_t ip6_frags_bypass_dup_fist_frag;
uint64_t ip6_frags_bypass_dup_last_frag;
uint64_t ip6_frags_no_buffer;
uint64_t ip6_frags_timeout;
uint64_t ip6_frags_invalid_length;
uint64_t ip6_frags_overlap;
uint64_t ip6_frags_too_many;
} __attribute__((aligned(64)));
struct ip_reassembly_config *ip_reassembly_config_new(const char *toml_file);
void ip_reassembly_config_free(struct ip_reassembly_config *cfg);
void ip_reassembly_config_print(const struct ip_reassembly_config *cfg);
struct ip_reassembly *ip_reassembly_new(uint64_t timeout_ms, uint64_t frag_queue_num, uint64_t frag_queue_size);
void ip_reassembly_free(struct ip_reassembly *ip_reass);
struct ip_reassembly *ip_reassembly_new(const struct ip_reassembly_config *cfg, uint64_t now);
void ip_reassembly_free(struct ip_reassembly *assy);
void ip_reassembly_expire(struct ip_reassembly *assy, uint64_t now);
struct ip_reassembly_stat *ip_reassembly_stat(struct ip_reassembly *assy);
struct packet *ip_reassembly_defrag(struct ip_reassembly *ip_reass, struct packet *pkt, uint64_t now);
struct packet *ip_reassembly_clean(struct ip_reassembly *ip_reass, uint64_t now_ms);
/*
* Returns the reassembled packet, or NULL if the packet is not reassembled
* The returned packet should be freed by calling the packet_free() function
*/
struct packet *ip_reassembly_packet(struct ip_reassembly *assy, const struct packet *pkt, uint64_t now);
struct packet *ipv4_reassembly_packet(struct ip_reassembly *assy, const struct packet *pkt, uint64_t now);
struct packet *ipv6_reassembly_packet(struct ip_reassembly *assy, const struct packet *pkt, uint64_t now);
struct ip_reassembly_stat *ip_reassembly_get_stat(struct ip_reassembly *ip_reass);
void ip_reassembly_print_stat(struct ip_reassembly *ip_reass);
#ifdef __cplusplus
}

View File

@@ -4,10 +4,6 @@ target_link_libraries(gtest_ipv4_reassembly ip_reassembly gtest)
add_executable(gtest_ipv6_reassembly gtest_ipv6_reassembly.cpp)
target_link_libraries(gtest_ipv6_reassembly ip_reassembly gtest)
add_executable(gtest_ip_reassembly gtest_ip_reassembly.cpp)
target_link_libraries(gtest_ip_reassembly ip_reassembly gtest)
include(GoogleTest)
gtest_discover_tests(gtest_ipv4_reassembly)
gtest_discover_tests(gtest_ipv6_reassembly)
gtest_discover_tests(gtest_ip_reassembly)
gtest_discover_tests(gtest_ipv6_reassembly)

View File

@@ -1,15 +0,0 @@
#include "gtest_utils.h"
#if 1
TEST(IP_REASSEMBLE, NESTED)
{
// TODO
// IP in IP, both with fragmentation
}
#endif
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -1,4 +1,49 @@
#include "gtest_utils.h"
#include <gtest/gtest.h>
#include "packet_parser.h"
#include "packet_helper.h"
#include "packet_internal.h"
#include "ip_reassembly.h"
static inline void packet_overwrite_v4_saddr(struct packet *pkt, uint32_t saddr)
{
const struct layer_private *ipv4_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_IPV4);
EXPECT_TRUE(ipv4_layer);
struct ip *hdr = (struct ip *)ipv4_layer->hdr_ptr;
ip4_hdr_set_src_addr(hdr, saddr);
}
static inline void stat_cmp(struct ip_reassembly_stat *curr, struct ip_reassembly_stat *expect)
{
EXPECT_TRUE(curr != NULL);
EXPECT_TRUE(expect != NULL);
EXPECT_TRUE(curr->ip4_defrags_expected == expect->ip4_defrags_expected);
EXPECT_TRUE(curr->ip4_defrags_succeed == expect->ip4_defrags_succeed);
EXPECT_TRUE(curr->ip4_defrags_failed == expect->ip4_defrags_failed);
EXPECT_TRUE(curr->ip4_frags == expect->ip4_frags);
EXPECT_TRUE(curr->ip4_frags_freed == expect->ip4_frags_freed);
EXPECT_TRUE(curr->ip4_frags_buffered == expect->ip4_frags_buffered);
EXPECT_TRUE(curr->ip4_frags_no_buffer == expect->ip4_frags_no_buffer);
EXPECT_TRUE(curr->ip4_frags_timeout == expect->ip4_frags_timeout);
EXPECT_TRUE(curr->ip4_frags_invalid_length == expect->ip4_frags_invalid_length);
EXPECT_TRUE(curr->ip4_frags_overlap == expect->ip4_frags_overlap);
EXPECT_TRUE(curr->ip4_frags_too_many == expect->ip4_frags_too_many);
EXPECT_TRUE(curr->ip6_defrags_expected == expect->ip6_defrags_expected);
EXPECT_TRUE(curr->ip6_defrags_succeed == expect->ip6_defrags_succeed);
EXPECT_TRUE(curr->ip6_defrags_failed == expect->ip6_defrags_failed);
EXPECT_TRUE(curr->ip6_frags == expect->ip6_frags);
EXPECT_TRUE(curr->ip6_frags_freed == expect->ip6_frags_freed);
EXPECT_TRUE(curr->ip6_frags_buffered == expect->ip6_frags_buffered);
EXPECT_TRUE(curr->ip6_frags_no_buffer == expect->ip6_frags_no_buffer);
EXPECT_TRUE(curr->ip6_frags_timeout == expect->ip6_frags_timeout);
EXPECT_TRUE(curr->ip6_frags_invalid_length == expect->ip6_frags_invalid_length);
EXPECT_TRUE(curr->ip6_frags_overlap == expect->ip6_frags_overlap);
EXPECT_TRUE(curr->ip6_frags_too_many == expect->ip6_frags_too_many);
}
/*
* Frame 4: 60 bytes on wire (480 bits), 60 bytes captured (480 bits)
@@ -194,60 +239,58 @@ unsigned char expect[] = {
#if 1
TEST(IPV4_REASSEMBLE, PADDING_ORDER)
{
struct packet pkt;
struct packet *new_pkt;
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet *frag_pkt = NULL;
struct packet *defrag_pkt = NULL;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
1, 1, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 2, 2, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt2, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt);
expect_stat = {
1, 1, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 2, 2, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
EXPECT_TRUE(defrag_pkt->data_len == 14 /* ETH */ + 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(defrag_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(defrag_pkt->data_ptr, expect, defrag_pkt->data_len) == 0);
// check IPv4
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV4);
layer = packet_get_innermost_layer(defrag_pkt, LAYER_PROTO_IPV4);
EXPECT_TRUE(layer);
struct ip *hdr = (struct ip *)layer->hdr_ptr;
EXPECT_TRUE(ip4_hdr_get_version(hdr) == 4);
@@ -266,7 +309,7 @@ TEST(IPV4_REASSEMBLE, PADDING_ORDER)
EXPECT_TRUE(ip4_hdr_get_opt_data(hdr) == NULL);
// check TCP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_TCP);
layer = packet_get_innermost_layer(defrag_pkt, LAYER_PROTO_TCP);
EXPECT_TRUE(layer);
struct tcphdr *tcp_hdr = (struct tcphdr *)layer->hdr_ptr;
EXPECT_TRUE(tcp_hdr_get_src_port(tcp_hdr) == 62629);
@@ -282,69 +325,89 @@ TEST(IPV4_REASSEMBLE, PADDING_ORDER)
EXPECT_TRUE(tcp_hdr_get_opt_data(tcp_hdr) == NULL);
// free packet
packet_free(new_pkt);
EXPECT_TRUE(packet_is_defraged(defrag_pkt));
ip_reassembly_free(assy);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt == NULL);
packet_free(defrag_pkt);
EXPECT_TRUE(ip_reassembly_clean(ip_reass, 2) == NULL);
expect_stat = {
1, 1, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 2, 2, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV4_REASSEMBLE, PADDING_UNORDER)
{
struct packet pkt;
struct packet *new_pkt;
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet *frag_pkt = NULL;
struct packet *defrag_pkt = NULL;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt2, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
1, 1, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 2, 2, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt);
expect_stat = {
1, 1, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 2, 2, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
EXPECT_TRUE(defrag_pkt->data_len == 14 /* ETH */ + 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(defrag_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(defrag_pkt->data_ptr, expect, defrag_pkt->data_len) == 0);
// check IPv4
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV4);
layer = packet_get_innermost_layer(defrag_pkt, LAYER_PROTO_IPV4);
EXPECT_TRUE(layer);
struct ip *hdr = (struct ip *)layer->hdr_ptr;
EXPECT_TRUE(ip4_hdr_get_version(hdr) == 4);
@@ -363,7 +426,7 @@ TEST(IPV4_REASSEMBLE, PADDING_UNORDER)
EXPECT_TRUE(ip4_hdr_get_opt_data(hdr) == NULL);
// check TCP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_TCP);
layer = packet_get_innermost_layer(defrag_pkt, LAYER_PROTO_TCP);
EXPECT_TRUE(layer);
struct tcphdr *tcp_hdr = (struct tcphdr *)layer->hdr_ptr;
EXPECT_TRUE(tcp_hdr_get_src_port(tcp_hdr) == 62629);
@@ -379,358 +442,308 @@ TEST(IPV4_REASSEMBLE, PADDING_UNORDER)
EXPECT_TRUE(tcp_hdr_get_opt_data(tcp_hdr) == NULL);
// free packet
packet_free(new_pkt);
EXPECT_TRUE(packet_is_defraged(defrag_pkt));
ip_reassembly_free(assy);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt == NULL);
packet_free(defrag_pkt);
EXPECT_TRUE(ip_reassembly_clean(ip_reass, 2) == NULL);
expect_stat = {
1, 1, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 2, 2, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV4_REASSEMBLE, EXPIRE)
{
struct packet pkt;
struct packet *new_pkt;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet frag_pkt1 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
expect_stat = {
1, 0, 1, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 1, 1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 2);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
2, 0, 1, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 1, 2, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV4_REASSEMBLE, DUP_FIRST_FRAG)
{
struct packet pkt;
struct packet *new_pkt;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
packet_parse(&frag_pkt2, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 1, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 1, 1, 0, 0, 0, 1, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 1, 0, 1, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
expect_stat = {
1, 0, 1, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 1, 1, 0, 0, 0, 1, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
1, 1, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 2, 2, 0, 1, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
// check IPv4
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV4);
EXPECT_TRUE(layer);
struct ip *hdr = (struct ip *)layer->hdr_ptr;
EXPECT_TRUE(ip4_hdr_get_version(hdr) == 4);
EXPECT_TRUE(ip4_hdr_get_hdr_len(hdr) == 20 /* IPv4 */);
EXPECT_TRUE(ip4_hdr_get_tos(hdr) == 0);
EXPECT_TRUE(ip4_hdr_get_total_len(hdr) == 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(ip4_hdr_get_ipid(hdr) == 0xffff);
EXPECT_TRUE(ip4_hdr_get_flags(hdr) == 0x0);
EXPECT_TRUE(ip4_hdr_get_frag_offset(hdr) == 0);
EXPECT_TRUE(ip4_hdr_get_ttl(hdr) == 127);
EXPECT_TRUE(ip4_hdr_get_proto(hdr) == 6);
EXPECT_TRUE(ip4_hdr_get_checksum(hdr) == 0x6d73); // NOTE this is correct checksum
EXPECT_TRUE(ip4_hdr_get_src_addr(hdr) == 0xc0a82467);
EXPECT_TRUE(ip4_hdr_get_dst_addr(hdr) == 0xc0a82889);
EXPECT_TRUE(ip4_hdr_get_opt_len(hdr) == 0);
EXPECT_TRUE(ip4_hdr_get_opt_data(hdr) == NULL);
// check TCP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_TCP);
EXPECT_TRUE(layer);
struct tcphdr *tcp_hdr = (struct tcphdr *)layer->hdr_ptr;
EXPECT_TRUE(tcp_hdr_get_src_port(tcp_hdr) == 62629);
EXPECT_TRUE(tcp_hdr_get_dst_port(tcp_hdr) == 9999);
EXPECT_TRUE(tcp_hdr_get_seq(tcp_hdr) == 2433164423);
EXPECT_TRUE(tcp_hdr_get_ack(tcp_hdr) == 64297191);
EXPECT_TRUE(tcp_hdr_get_hdr_len(tcp_hdr) == 20);
EXPECT_TRUE(tcp_hdr_get_flags(tcp_hdr) == 0x018);
EXPECT_TRUE(tcp_hdr_get_window(tcp_hdr) == 65280);
EXPECT_TRUE(tcp_hdr_get_checksum(tcp_hdr) == 0x5e92);
EXPECT_TRUE(tcp_hdr_get_urg_ptr(tcp_hdr) == 0);
EXPECT_TRUE(tcp_hdr_get_opt_len(tcp_hdr) == 0);
EXPECT_TRUE(tcp_hdr_get_opt_data(tcp_hdr) == NULL);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV4_REASSEMBLE, DUP_LAST_FRAG)
{
struct packet pkt;
struct packet *new_pkt;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
packet_parse(&frag_pkt2, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 1, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 1, 1, 0, 0, 0, 1, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 1, 0, 0, 1, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
expect_stat = {
1, 0, 1, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 1, 1, 0, 0, 0, 1, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
1, 1, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 2, 2, 0, 0, 1, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
// check IPv4
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV4);
EXPECT_TRUE(layer);
struct ip *hdr = (struct ip *)layer->hdr_ptr;
EXPECT_TRUE(ip4_hdr_get_version(hdr) == 4);
EXPECT_TRUE(ip4_hdr_get_hdr_len(hdr) == 20 /* IPv4 */);
EXPECT_TRUE(ip4_hdr_get_tos(hdr) == 0);
EXPECT_TRUE(ip4_hdr_get_total_len(hdr) == 20 /* IPv4 */ + 20 /* TCP */ + 28 /* DATA */);
EXPECT_TRUE(ip4_hdr_get_ipid(hdr) == 0xffff);
EXPECT_TRUE(ip4_hdr_get_flags(hdr) == 0x0);
EXPECT_TRUE(ip4_hdr_get_frag_offset(hdr) == 0);
EXPECT_TRUE(ip4_hdr_get_ttl(hdr) == 127);
EXPECT_TRUE(ip4_hdr_get_proto(hdr) == 6);
EXPECT_TRUE(ip4_hdr_get_checksum(hdr) == 0x6d73); // NOTE this is correct checksum
EXPECT_TRUE(ip4_hdr_get_src_addr(hdr) == 0xc0a82467);
EXPECT_TRUE(ip4_hdr_get_dst_addr(hdr) == 0xc0a82889);
EXPECT_TRUE(ip4_hdr_get_opt_len(hdr) == 0);
EXPECT_TRUE(ip4_hdr_get_opt_data(hdr) == NULL);
// check TCP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_TCP);
EXPECT_TRUE(layer);
struct tcphdr *tcp_hdr = (struct tcphdr *)layer->hdr_ptr;
EXPECT_TRUE(tcp_hdr_get_src_port(tcp_hdr) == 62629);
EXPECT_TRUE(tcp_hdr_get_dst_port(tcp_hdr) == 9999);
EXPECT_TRUE(tcp_hdr_get_seq(tcp_hdr) == 2433164423);
EXPECT_TRUE(tcp_hdr_get_ack(tcp_hdr) == 64297191);
EXPECT_TRUE(tcp_hdr_get_hdr_len(tcp_hdr) == 20);
EXPECT_TRUE(tcp_hdr_get_flags(tcp_hdr) == 0x018);
EXPECT_TRUE(tcp_hdr_get_window(tcp_hdr) == 65280);
EXPECT_TRUE(tcp_hdr_get_checksum(tcp_hdr) == 0x5e92);
EXPECT_TRUE(tcp_hdr_get_urg_ptr(tcp_hdr) == 0);
EXPECT_TRUE(tcp_hdr_get_opt_len(tcp_hdr) == 0);
EXPECT_TRUE(tcp_hdr_get_opt_data(tcp_hdr) == NULL);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV4_REASSEMBLE, FULL)
{
struct packet pkt;
struct packet *new_pkt;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 1,
.bucket_num = 1,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet frag_pkt3 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 2, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
char dup1[sizeof(frag1)] = {0};
char dup2[sizeof(frag1)] = {0};
char dup3[sizeof(frag1)] = {0};
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
char dup_frag[sizeof(frag1)] = {0};
memcpy(dup_frag, frag1, sizeof(frag1));
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)dup_frag, sizeof(dup_frag));
memcpy(dup1, frag1, sizeof(frag1));
memcpy(dup2, frag1, sizeof(frag1));
memcpy(dup3, frag1, sizeof(frag1));
// flow1
packet_set_ipv4_src_addr(&pkt, 1);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
1, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)dup1, sizeof(dup1));
packet_overwrite_v4_saddr(&frag_pkt1, 1);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
1, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// flow2
packet_set_ipv4_src_addr(&pkt, 2);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
2, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 2, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt2, (const char *)dup2, sizeof(dup2));
packet_overwrite_v4_saddr(&frag_pkt2, 2);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
2, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
2, 0, 2, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// flow3
packet_set_ipv4_src_addr(&pkt, 3);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
packet_parse(&frag_pkt3, (const char *)dup3, sizeof(dup3));
packet_overwrite_v4_saddr(&frag_pkt3, 3);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt3, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
2, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
3, 0, 2, 1, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
check_stat(ip_reassembly_stat(assy),
2, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 0, 2, 1, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
expect_stat = {
2, 0, 2, // ip4: defrags_expected, defrags_succeed, defrags_failed
3, 2, 2, 1, 2, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
}
#endif
#if 1
TEST(IPV4_REASSEMBLE, OVERLAP)
{
// TEST ON IPv6
ip_reassembly_free(ip_reass);
}
#endif

View File

@@ -1,4 +1,59 @@
#include "gtest_utils.h"
#include <gtest/gtest.h>
#include "packet_parser.h"
#include "packet_helper.h"
#include "packet_internal.h"
#include "ip_reassembly.h"
static inline void packet_overwrite_v6_saddr(struct packet *pkt, struct in6_addr saddr)
{
const struct layer_private *ipv6_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(ipv6_layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)ipv6_layer->hdr_ptr;
ip6_hdr_set_src_in6_addr(hdr, saddr);
}
static inline void packet_overwrite_v6_frag_offset(struct packet *pkt, uint16_t offset)
{
const struct layer_private *ipv6_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(ipv6_layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)ipv6_layer->hdr_ptr;
struct ip6_frag *frag_hdr = ip6_hdr_get_frag_ext(hdr);
EXPECT_TRUE(frag_hdr);
ipv6_frag_set_offset(frag_hdr, offset);
}
static inline void stat_cmp(struct ip_reassembly_stat *curr, struct ip_reassembly_stat *expect)
{
EXPECT_TRUE(curr != NULL);
EXPECT_TRUE(expect != NULL);
EXPECT_TRUE(curr->ip4_defrags_expected == expect->ip4_defrags_expected);
EXPECT_TRUE(curr->ip4_defrags_succeed == expect->ip4_defrags_succeed);
EXPECT_TRUE(curr->ip4_defrags_failed == expect->ip4_defrags_failed);
EXPECT_TRUE(curr->ip4_frags == expect->ip4_frags);
EXPECT_TRUE(curr->ip4_frags_freed == expect->ip4_frags_freed);
EXPECT_TRUE(curr->ip4_frags_buffered == expect->ip4_frags_buffered);
EXPECT_TRUE(curr->ip4_frags_no_buffer == expect->ip4_frags_no_buffer);
EXPECT_TRUE(curr->ip4_frags_timeout == expect->ip4_frags_timeout);
EXPECT_TRUE(curr->ip4_frags_invalid_length == expect->ip4_frags_invalid_length);
EXPECT_TRUE(curr->ip4_frags_overlap == expect->ip4_frags_overlap);
EXPECT_TRUE(curr->ip4_frags_too_many == expect->ip4_frags_too_many);
EXPECT_TRUE(curr->ip6_defrags_expected == expect->ip6_defrags_expected);
EXPECT_TRUE(curr->ip6_defrags_succeed == expect->ip6_defrags_succeed);
EXPECT_TRUE(curr->ip6_defrags_failed == expect->ip6_defrags_failed);
EXPECT_TRUE(curr->ip6_frags == expect->ip6_frags);
EXPECT_TRUE(curr->ip6_frags_freed == expect->ip6_frags_freed);
EXPECT_TRUE(curr->ip6_frags_buffered == expect->ip6_frags_buffered);
EXPECT_TRUE(curr->ip6_frags_no_buffer == expect->ip6_frags_no_buffer);
EXPECT_TRUE(curr->ip6_frags_timeout == expect->ip6_frags_timeout);
EXPECT_TRUE(curr->ip6_frags_invalid_length == expect->ip6_frags_invalid_length);
EXPECT_TRUE(curr->ip6_frags_overlap == expect->ip6_frags_overlap);
EXPECT_TRUE(curr->ip6_frags_too_many == expect->ip6_frags_too_many);
}
/*
* Frame 3: 1510 bytes on wire (12080 bits), 1510 bytes captured (12080 bits)
@@ -605,84 +660,84 @@ unsigned char expect[] = {
#if 1
TEST(IPV6_REASSEMBLE, NORMAL)
{
struct packet pkt;
struct packet *new_pkt;
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet frag_pkt3 = {};
struct packet frag_pkt4 = {};
struct packet *frag_pkt = NULL;
struct packet *defrag_pkt = NULL;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 2, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt2, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
2, 0, 2, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag3
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag3, sizeof(frag3));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 0, 3, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt3, (const char *)frag3, sizeof(frag3));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt3, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
3, 0, 3, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag4
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag4, sizeof(frag4));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 1, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
4, 4, 4, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt4, (const char *)frag4, sizeof(frag4));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt4, 1);
EXPECT_TRUE(defrag_pkt);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 1, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
4, 4, 4, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 40 /* IPv6 */ + 8 /* UDP */ + 5379 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
EXPECT_TRUE(defrag_pkt->data_len == 14 /* ETH */ + 40 /* IPv6 */ + 8 /* UDP */ + 5379 /* DATA */);
EXPECT_TRUE(defrag_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(defrag_pkt->data_ptr, expect, defrag_pkt->data_len) == 0);
// check IPv6
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV6);
layer = packet_get_innermost_layer(defrag_pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)layer->hdr_ptr;
EXPECT_TRUE(ip6_hdr_get_version(hdr) == 6);
@@ -702,7 +757,7 @@ TEST(IPV6_REASSEMBLE, NORMAL)
EXPECT_TRUE(ip6_hdr_get_frag_ext(hdr) == NULL);
// check UDP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_UDP);
layer = packet_get_innermost_layer(defrag_pkt, LAYER_PROTO_UDP);
EXPECT_TRUE(layer);
struct udphdr *udp_hdr = (struct udphdr *)layer->hdr_ptr;
EXPECT_TRUE(udp_hdr_get_src_port(udp_hdr) == 6363);
@@ -711,473 +766,309 @@ TEST(IPV6_REASSEMBLE, NORMAL)
EXPECT_TRUE(udp_hdr_get_checksum(udp_hdr) == 0x7916);
// free packet
packet_free(new_pkt);
EXPECT_TRUE(packet_is_defraged(defrag_pkt));
ip_reassembly_free(assy);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt);
packet_free(frag_pkt);
frag_pkt = packet_pop_frag(defrag_pkt);
EXPECT_TRUE(frag_pkt == NULL);
packet_free(defrag_pkt);
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
packet_free(pkt);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 1, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
4, 4, 4, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV6_REASSEMBLE, EXPIRE)
{
struct packet pkt;
struct packet *new_pkt;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 2);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
2, 0, 1, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 1, 2, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
}
#endif
#if 1
TEST(IPV6_REASSEMBLE, DUP_FIRST_FRAG)
{
struct packet pkt;
struct packet *new_pkt;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
packet_parse(&frag_pkt2, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 2);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
2, 0, 2, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 1, 0, 1, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 0, 2, 0, 1, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag3
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag3, sizeof(frag3));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
4, 0, 3, 0, 1, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag4
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag4, sizeof(frag4));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 1, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
5, 4, 4, 0, 1, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 40 /* IPv6 */ + 8 /* UDP */ + 5379 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
// check IPv6
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)layer->hdr_ptr;
EXPECT_TRUE(ip6_hdr_get_version(hdr) == 6);
EXPECT_TRUE(ip6_hdr_get_traffic_class(hdr) == 0);
EXPECT_TRUE(ip6_hdr_get_flow_label(hdr) == 0x21289);
EXPECT_TRUE(ip6_hdr_get_payload_len(hdr) == 5387);
EXPECT_TRUE(ip6_hdr_get_next_header(hdr) == 17); // UDP
EXPECT_TRUE(ip6_hdr_get_hop_limit(hdr) == 64);
char src_str[INET6_ADDRSTRLEN];
char dst_str[INET6_ADDRSTRLEN];
struct in6_addr src_addr = ip6_hdr_get_src_in6_addr(hdr);
struct in6_addr dst_addr = ip6_hdr_get_dst_in6_addr(hdr);
inet_ntop(AF_INET6, &src_addr, src_str, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, &dst_addr, dst_str, INET6_ADDRSTRLEN);
EXPECT_TRUE(strcmp(src_str, "2607:f010:3f9::1001") == 0);
EXPECT_TRUE(strcmp(dst_str, "2607:f010:3f9::11:0") == 0);
EXPECT_TRUE(ip6_hdr_get_frag_ext(hdr) == NULL);
// check UDP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_UDP);
EXPECT_TRUE(layer);
struct udphdr *udp_hdr = (struct udphdr *)layer->hdr_ptr;
EXPECT_TRUE(udp_hdr_get_src_port(udp_hdr) == 6363);
EXPECT_TRUE(udp_hdr_get_dst_port(udp_hdr) == 6363);
EXPECT_TRUE(udp_hdr_get_total_len(udp_hdr) == 5387);
EXPECT_TRUE(udp_hdr_get_checksum(udp_hdr) == 0x7916);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
}
#endif
#if 1
TEST(IPV6_REASSEMBLE, DUP_LAST_FRAG)
{
struct packet pkt;
struct packet *new_pkt;
const struct layer_private *layer;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 1, // ip6: defrags_expected, defrags_succeed, defrags_failed
2, 2, 2, 0, 2, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag4
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag4, sizeof(frag4));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag4
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag4, sizeof(frag4));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 1, 0, 0, 1); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag3
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag3, sizeof(frag3));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 0, 2, 0, 0, 1); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
4, 0, 3, 0, 0, 1); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 1, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
5, 4, 4, 0, 0, 1); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
// check packet
packet_print(new_pkt);
EXPECT_TRUE(new_pkt->data_len == 14 /* ETH */ + 40 /* IPv6 */ + 8 /* UDP */ + 5379 /* DATA */);
EXPECT_TRUE(new_pkt->data_len == sizeof(expect));
EXPECT_TRUE(memcmp(new_pkt->data_ptr, expect, new_pkt->data_len) == 0);
// check IPv6
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)layer->hdr_ptr;
EXPECT_TRUE(ip6_hdr_get_version(hdr) == 6);
EXPECT_TRUE(ip6_hdr_get_traffic_class(hdr) == 0);
EXPECT_TRUE(ip6_hdr_get_flow_label(hdr) == 0x21289);
EXPECT_TRUE(ip6_hdr_get_payload_len(hdr) == 5387);
EXPECT_TRUE(ip6_hdr_get_next_header(hdr) == 17); // UDP
EXPECT_TRUE(ip6_hdr_get_hop_limit(hdr) == 64);
char src_str[INET6_ADDRSTRLEN];
char dst_str[INET6_ADDRSTRLEN];
struct in6_addr src_addr = ip6_hdr_get_src_in6_addr(hdr);
struct in6_addr dst_addr = ip6_hdr_get_dst_in6_addr(hdr);
inet_ntop(AF_INET6, &src_addr, src_str, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, &dst_addr, dst_str, INET6_ADDRSTRLEN);
EXPECT_TRUE(strcmp(src_str, "2607:f010:3f9::1001") == 0);
EXPECT_TRUE(strcmp(dst_str, "2607:f010:3f9::11:0") == 0);
EXPECT_TRUE(ip6_hdr_get_frag_ext(hdr) == NULL);
// check UDP
layer = packet_get_innermost_layer(new_pkt, LAYER_PROTO_UDP);
EXPECT_TRUE(layer);
struct udphdr *udp_hdr = (struct udphdr *)layer->hdr_ptr;
EXPECT_TRUE(udp_hdr_get_src_port(udp_hdr) == 6363);
EXPECT_TRUE(udp_hdr_get_dst_port(udp_hdr) == 6363);
EXPECT_TRUE(udp_hdr_get_total_len(udp_hdr) == 5387);
EXPECT_TRUE(udp_hdr_get_checksum(udp_hdr) == 0x7916);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV6_REASSEMBLE, FULL)
{
struct packet pkt;
struct packet *new_pkt;
struct in6_addr src_addr;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 1,
.bucket_num = 1,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct in6_addr addr;
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet frag_pkt3 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 2, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
char dup1[sizeof(frag1)] = {0};
char dup2[sizeof(frag1)] = {0};
char dup3[sizeof(frag1)] = {0};
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
char dup_frag[sizeof(frag1)] = {0};
memcpy(dup_frag, frag1, sizeof(frag1));
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)dup_frag, sizeof(dup_frag));
memcpy(dup1, frag1, sizeof(frag1));
memcpy(dup2, frag1, sizeof(frag1));
memcpy(dup3, frag1, sizeof(frag1));
// flow1
memset(&src_addr, 1, sizeof(src_addr));
packet_set_ipv6_src_addr(&pkt, src_addr);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)dup1, sizeof(dup1));
memset(&addr, 1, sizeof(addr));
packet_overwrite_v6_saddr(&frag_pkt1, addr);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// flow2
memset(&src_addr, 2, sizeof(src_addr));
packet_set_ipv6_src_addr(&pkt, src_addr);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
2, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 2, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt2, (const char *)dup2, sizeof(dup2));
memset(&addr, 2, sizeof(addr));
packet_overwrite_v6_saddr(&frag_pkt2, addr);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
2, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
2, 0, 2, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// flow3
memset(&src_addr, 3, sizeof(src_addr));
packet_set_ipv6_src_addr(&pkt, src_addr);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
packet_parse(&frag_pkt3, (const char *)dup3, sizeof(dup3));
memset(&addr, 3, sizeof(addr));
packet_overwrite_v6_saddr(&frag_pkt3, addr);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt3, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
2, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
3, 0, 2, 1, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
2, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 0, 2, 1, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
2, 0, 2, // ip6: defrags_expected, defrags_succeed, defrags_failed
3, 2, 2, 1, 2, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
ip_reassembly_free(ip_reass);
}
#endif
#if 1
TEST(IPV6_REASSEMBLE, OVERLAP)
{
struct packet pkt;
struct packet *new_pkt;
struct ip_reassembly *assy;
struct ip_reassembly_config cfg = {
.enable = true,
.bucket_entries = 16,
.bucket_num = 8,
.ip_frag_timeout_ms = 1,
.ip_frag_expire_polling_interval_ms = 0,
.ip_frag_expire_polling_limit = 1024,
struct packet frag_pkt1 = {};
struct packet frag_pkt2 = {};
struct packet frag_pkt3 = {};
struct packet frag_pkt4 = {};
struct packet *defrag_pkt = NULL;
struct ip_reassembly *ip_reass;
struct ip_reassembly_stat *curr_stat;
struct ip_reassembly_stat expect_stat;
ip_reass = ip_reassembly_new(1, 1024, 64);
EXPECT_TRUE(ip_reass != NULL);
curr_stat = ip_reassembly_get_stat(ip_reass);
EXPECT_TRUE(curr_stat != NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
assy = ip_reassembly_new(&cfg, 0);
EXPECT_TRUE(assy != NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
0, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
stat_cmp(curr_stat, &expect_stat);
// frag1
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag1, sizeof(frag1));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
1, 0, 1, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt1, (const char *)frag1, sizeof(frag1));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt1, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
1, 0, 1, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag2
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag2, sizeof(frag2));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
2, 0, 2, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt2, (const char *)frag2, sizeof(frag2));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt2, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
2, 0, 2, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag3 -- overwrite frag offset
char dup_frag[sizeof(frag3)] = {0};
memcpy(dup_frag, frag3, sizeof(frag3));
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)dup_frag, sizeof(dup_frag));
packet_set_ipv6_frag_offset(&pkt, 2048);
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
3, 0, 3, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
packet_parse(&frag_pkt3, (const char *)dup_frag, sizeof(dup_frag));
packet_overwrite_v6_frag_offset(&frag_pkt3, 2048);
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt3, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed
3, 0, 3, 0, 0, 0, 0, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// frag4
memset(&pkt, 0, sizeof(pkt));
packet_parse(&pkt, (const char *)frag4, sizeof(frag4));
new_pkt = ip_reassembly_packet(assy, &pkt, 1);
EXPECT_TRUE(new_pkt == NULL);
packet_parse(&frag_pkt4, (const char *)frag4, sizeof(frag4));
defrag_pkt = ip_reassembly_defrag(ip_reass, &frag_pkt4, 1);
EXPECT_TRUE(defrag_pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 1, // ip6: defrags_expected, defrags_succeed, defrags_failed
4, 4, 4, 0, 0, 0, 1, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
check_stat(ip_reassembly_stat(assy),
0, 0, 0, 0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
1, 0, 0, 0, 1, 0, // ip6: defrags_expected, defrags_succeed, defrags_failed_timeout, defrags_failed_invalid_length, defrags_failed_overlap, defrags_failed_too_many_frag
4, 4, 4, 0, 0, 0); // ip6: frags, frags_freed, frags_buffered, frags_bypass_no_buffer, frags_bypass_dup_fist_frag, frags_bypass_dup_last_frag
struct packet *pkt = NULL;
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt);
packet_free(pkt);
pkt = ip_reassembly_clean(ip_reass, 2);
EXPECT_TRUE(pkt == NULL);
expect_stat = {
0, 0, 0, // ip4: defrags_expected, defrags_succeed, defrags_failed
0, 0, 0, 0, 0, 0, 0, 0, // ip4: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
1, 0, 1, // ip6: defrags_expected, defrags_succeed, defrags_failed
4, 4, 4, 0, 0, 0, 1, 0 // ip6: frags, frags_freed, frags_buffered, frags_no_buffer, timeout, invalid_length, overlap, too_many
};
stat_cmp(curr_stat, &expect_stat);
// free packet
packet_free(new_pkt);
ip_reassembly_free(assy);
ip_reassembly_free(ip_reass);
}
#endif

View File

@@ -1,106 +0,0 @@
#pragma once
#include <gtest/gtest.h>
#ifdef __cplusplus
extern "C"
{
#endif
#include "packet_helper.h"
#include "ip_reassembly.h"
#include "packet_internal.h"
#include "packet_dump.h"
#include "packet_parser.h"
static inline void packet_set_ipv4_src_addr(struct packet *pkt, uint32_t saddr)
{
const struct layer_private *ipv4_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_IPV4);
EXPECT_TRUE(ipv4_layer);
struct ip *hdr = (struct ip *)ipv4_layer->hdr_ptr;
ip4_hdr_set_src_addr(hdr, saddr);
}
static inline void packet_set_ipv6_src_addr(struct packet *pkt, struct in6_addr saddr)
{
const struct layer_private *ipv6_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(ipv6_layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)ipv6_layer->hdr_ptr;
ip6_hdr_set_src_in6_addr(hdr, saddr);
}
static inline void packet_set_ipv6_frag_offset(struct packet *pkt, uint16_t offset)
{
const struct layer_private *ipv6_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_IPV6);
EXPECT_TRUE(ipv6_layer);
struct ip6_hdr *hdr = (struct ip6_hdr *)ipv6_layer->hdr_ptr;
struct ip6_frag *frag_hdr = ip6_hdr_get_frag_ext(hdr);
EXPECT_TRUE(frag_hdr);
ipv6_frag_set_offset(frag_hdr, offset);
}
static inline void check_stat(struct ip_reassembly_stat *stat,
// IPv4 frag stat
uint64_t ip4_defrags_expected,
uint64_t ip4_defrags_succeed,
uint64_t ip4_defrags_failed_timeout,
uint64_t ip4_defrags_failed_invalid_length,
uint64_t ip4_defrags_failed_overlap,
uint64_t ip4_defrags_failed_too_many_frag,
uint64_t ip4_frags,
uint64_t ip4_frags_freed,
uint64_t ip4_frags_buffered,
uint64_t ip4_frags_bypass_no_buffer,
uint64_t ip4_frags_bypass_dup_fist_frag,
uint64_t ip4_frags_bypass_dup_last_frag,
// IPv6 frag stat
uint64_t ip6_defrags_expected,
uint64_t ip6_defrags_succeed,
uint64_t ip6_defrags_failed_timeout,
uint64_t ip6_defrags_failed_invalid_length,
uint64_t ip6_defrags_failed_overlap,
uint64_t ip6_defrags_failed_too_many_frag,
uint64_t ip6_frags,
uint64_t ip6_frags_freed,
uint64_t ip6_frags_buffered,
uint64_t ip6_frags_bypass_no_buffer,
uint64_t ip6_frags_bypass_dup_fist_frag,
uint64_t ip6_frags_bypass_dup_last_frag)
{
EXPECT_TRUE(stat != NULL);
EXPECT_TRUE(stat->ip4_defrags_expected == ip4_defrags_expected);
EXPECT_TRUE(stat->ip4_defrags_succeed == ip4_defrags_succeed);
EXPECT_TRUE(stat->ip4_defrags_failed_timeout == ip4_defrags_failed_timeout);
EXPECT_TRUE(stat->ip4_defrags_failed_invalid_length == ip4_defrags_failed_invalid_length);
EXPECT_TRUE(stat->ip4_defrags_failed_overlap == ip4_defrags_failed_overlap);
EXPECT_TRUE(stat->ip4_defrags_failed_too_many_frag == ip4_defrags_failed_too_many_frag);
EXPECT_TRUE(stat->ip4_frags == ip4_frags);
EXPECT_TRUE(stat->ip4_frags_freed == ip4_frags_freed);
EXPECT_TRUE(stat->ip4_frags_buffered == ip4_frags_buffered);
EXPECT_TRUE(stat->ip4_frags_bypass_no_buffer == ip4_frags_bypass_no_buffer);
EXPECT_TRUE(stat->ip4_frags_bypass_dup_fist_frag == ip4_frags_bypass_dup_fist_frag);
EXPECT_TRUE(stat->ip4_frags_bypass_dup_last_frag == ip4_frags_bypass_dup_last_frag);
EXPECT_TRUE(stat->ip6_defrags_expected == ip6_defrags_expected);
EXPECT_TRUE(stat->ip6_defrags_succeed == ip6_defrags_succeed);
EXPECT_TRUE(stat->ip6_defrags_failed_timeout == ip6_defrags_failed_timeout);
EXPECT_TRUE(stat->ip6_defrags_failed_invalid_length == ip6_defrags_failed_invalid_length);
EXPECT_TRUE(stat->ip6_defrags_failed_overlap == ip6_defrags_failed_overlap);
EXPECT_TRUE(stat->ip6_defrags_failed_too_many_frag == ip6_defrags_failed_too_many_frag);
EXPECT_TRUE(stat->ip6_frags == ip6_frags);
EXPECT_TRUE(stat->ip6_frags_freed == ip6_frags_freed);
EXPECT_TRUE(stat->ip6_frags_buffered == ip6_frags_buffered);
EXPECT_TRUE(stat->ip6_frags_bypass_no_buffer == ip6_frags_bypass_no_buffer);
EXPECT_TRUE(stat->ip6_frags_bypass_dup_fist_frag == ip6_frags_bypass_dup_fist_frag);
EXPECT_TRUE(stat->ip6_frags_bypass_dup_last_frag == ip6_frags_bypass_dup_last_frag);
}
#ifdef __cplusplus
}
#endif

View File

@@ -1,3 +1,3 @@
add_library(packet_io pcap_io.c marsio_io.c packet_io.c)
add_library(packet_io pcap_io.c mars_io.c packet_io.c)
target_include_directories(packet_io PUBLIC ${CMAKE_CURRENT_LIST_DIR})
target_link_libraries(packet_io marsio pcap packet_manager)
target_link_libraries(packet_io marsio pcap packet_manager ip_reassembly)

618
infra/packet_io/mars_io.c Normal file
View File

@@ -0,0 +1,618 @@
#include <sched.h>
#include <assert.h>
#include "marsio.h"
#include "mars_io.h"
#include "packet_pool.h"
#include "packet_parser.h"
#include "ip_reassembly.h"
#include "log_internal.h"
#include "utils_internal.h"
#include "packet_internal.h"
#define MARS_IO_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "mars io", format, ##__VA_ARGS__)
#define MARS_IO_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "mars io", format, ##__VA_ARGS__)
struct mars_io_cfg
{
char app_symbol[64];
char dev_symbol[64];
uint64_t thread_num; // range [1, MAX_THREAD_NUM]
uint64_t cpu_mask[MAX_THREAD_NUM];
uint64_t idle_yield_ms; // range: [0, 6000] (ms)
// packet pool
uint64_t capacity; // range: [1, 4294967295]
// ip reassembly
uint64_t fail_action; // 0: bypass, 1: drop
uint64_t timeout_ms; // range: [1, 60000] (ms)
uint64_t frag_queue_num; // range: [1, 4294967295
uint64_t frag_queue_size; // range: [2, 65535]
};
struct mars_io
{
struct mars_io_cfg *cfg;
struct mr_instance *mr_ins;
struct mr_vdev *mr_dev;
struct mr_sendpath *mr_path;
struct packet_pool *pool[MAX_THREAD_NUM];
struct packet_io_stat stat[MAX_THREAD_NUM];
struct ip_reassembly *ip_reass[MAX_THREAD_NUM];
};
/******************************************************************************
* Private API
******************************************************************************/
static struct mars_io_cfg *mars_io_cfg_new(const char *toml_file)
{
struct mars_io_cfg *cfg = (struct mars_io_cfg *)calloc(1, sizeof(struct mars_io_cfg));
if (cfg == NULL)
{
return NULL;
}
int ret = 0;
int num = 0;
ret += load_toml_str_config(toml_file, "packet_io.app_symbol", cfg->app_symbol);
ret += load_toml_str_config(toml_file, "packet_io.dev_symbol", cfg->dev_symbol);
ret += load_toml_integer_config(toml_file, "packet_io.thread_num", &cfg->thread_num, 1, MAX_THREAD_NUM);
ret += load_toml_integer_config(toml_file, "packet_io.idle_yield_ms", &cfg->idle_yield_ms, 0, 60000);
num = load_toml_array_config(toml_file, "packet_io.cpu_mask", cfg->cpu_mask, MAX_THREAD_NUM);
ret += load_toml_integer_config(toml_file, "packet_io.packet_pool.capacity", &cfg->capacity, 1, 4294967295);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.fail_action", &cfg->fail_action, 0, 1);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.timeout_ms", &cfg->timeout_ms, 1, 60000);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.frag_queue_num", &cfg->frag_queue_num, 1, 4294967295);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.frag_queue_size", &cfg->frag_queue_size, 2, 65535);
if (ret != 0 || num != (int)cfg->thread_num)
{
free(cfg);
return NULL;
}
else
{
return cfg;
}
}
static void mars_io_cfg_free(struct mars_io_cfg *cfg)
{
if (cfg)
{
free(cfg);
cfg = NULL;
}
}
static void mars_io_cfg_print(const struct mars_io_cfg *cfg)
{
if (cfg)
{
MARS_IO_LOG_INFO("packet_io.app_symbol : %s", cfg->app_symbol);
MARS_IO_LOG_INFO("packet_io.dev_symbol : %s", cfg->dev_symbol);
MARS_IO_LOG_INFO("packet_io.idle_yield_ms : %lu", cfg->idle_yield_ms);
MARS_IO_LOG_INFO("packet_io.thread_num : %lu", cfg->thread_num);
for (uint64_t i = 0; i < cfg->thread_num; i++)
{
MARS_IO_LOG_INFO("packet_io.cpu_mask[%03d] : %d", i, cfg->cpu_mask[i]);
}
MARS_IO_LOG_INFO("packet_io.packet_pool.capacity : %lu", cfg->capacity);
MARS_IO_LOG_INFO("packet_io.ip_reassembly.fail_action : %lu", cfg->fail_action);
MARS_IO_LOG_INFO("packet_io.ip_reassembly.timeout_ms : %lu", cfg->timeout_ms);
MARS_IO_LOG_INFO("packet_io.ip_reassembly.frag_queue_num : %lu", cfg->frag_queue_num);
MARS_IO_LOG_INFO("packet_io.ip_reassembly.frag_queue_size : %lu", cfg->frag_queue_size);
}
}
static void packet_set_metadata(struct packet *pkt, marsio_buff_t *mbuff)
{
struct route_ctx route_ctx = {};
route_ctx.used = marsio_buff_get_metadata(mbuff, MR_BUFF_ROUTE_CTX, &route_ctx.data, sizeof(route_ctx.data));
if (route_ctx.used > 0)
{
packet_set_route_ctx(pkt, &route_ctx);
}
else
{
MARS_IO_LOG_ERROR("failed to get route ctx");
}
struct sids sids = {};
sids.used = marsio_buff_get_sid_list(mbuff, sids.sid, sizeof(sids.sid) / sizeof(sids.sid[0]));
if (sids.used > 0)
{
packet_set_sids(pkt, &sids);
}
else
{
MARS_IO_LOG_ERROR("failed to get sids");
}
uint64_t session_id = 0;
if (marsio_buff_get_metadata(mbuff, MR_BUFF_SESSION_ID, &session_id, sizeof(session_id)) == sizeof(session_id))
{
packet_set_session_id(pkt, session_id);
}
else
{
MARS_IO_LOG_ERROR("failed to get session id");
}
// TODO
#if 0
if (marsio_buff_get_metadata(mbuff, MR_BUFF_DOMAIN, &domain, sizeof(domain)) == sizeof(domain))
{
packet_set_domain(pkt, domain);
}
else
{
MARS_IO_LOG_ERROR("failed to get domain id");
}
#endif
uint16_t link_id = 0;
if (marsio_buff_get_metadata(mbuff, MR_BUFF_LINK_ID, &link_id, sizeof(link_id)) == sizeof(link_id))
{
packet_set_link_id(pkt, link_id);
}
else
{
MARS_IO_LOG_ERROR("failed to get link id");
}
packet_set_ctrl(pkt, marsio_buff_is_ctrlbuf(mbuff));
enum packet_direction direction = PACKET_DIRECTION_OUTGOING;
if (marsio_buff_get_metadata(mbuff, MR_BUFF_DIR, &direction, sizeof(direction)) == sizeof(direction))
{
packet_set_direction(pkt, direction);
}
else
{
MARS_IO_LOG_ERROR("failed to get direction");
}
packet_set_action(pkt, PACKET_ACTION_FORWARD);
// TODO
const struct timeval tv = {};
packet_set_timeval(pkt, &tv);
}
static void mbuff_set_metadata(marsio_buff_t *mbuff, struct packet *pkt)
{
const struct route_ctx *route_ctx = packet_get_route_ctx(pkt);
if (marsio_buff_set_metadata(mbuff, MR_BUFF_ROUTE_CTX, (void *)route_ctx->data, route_ctx->used) != 0)
{
MARS_IO_LOG_ERROR("failed to set route ctx");
}
const struct sids *sids = packet_get_sids(pkt);
if (marsio_buff_set_sid_list(mbuff, (sid_t *)sids->sid, sids->used) != 0)
{
MARS_IO_LOG_ERROR("failed to set sids");
}
uint64_t session_id = packet_get_session_id(pkt);
if (marsio_buff_set_metadata(mbuff, MR_BUFF_SESSION_ID, &session_id, sizeof(session_id)) != 0)
{
MARS_IO_LOG_ERROR("failed to set session id");
}
// TODO
#if 0
uint64_t domain = packet_get_domain(pkt);
if (marsio_buff_set_metadata(mbuff, MR_BUFF_DOMAIN, &domain, sizeof(domain)) != 0)
{
MARS_IO_LOG_ERROR("failed to set domain id");
}
#endif
uint16_t link_id = packet_get_link_id(pkt);
if (marsio_buff_set_metadata(mbuff, MR_BUFF_LINK_ID, &link_id, sizeof(link_id)) != 0)
{
MARS_IO_LOG_ERROR("failed to set link id");
}
if (packet_is_ctrl(pkt))
{
marsio_buff_set_ctrlbuf(mbuff);
}
enum packet_direction direction = packet_get_direction(pkt);
if (marsio_buff_set_metadata(mbuff, MR_BUFF_DIR, &direction, sizeof(direction)) != 0)
{
MARS_IO_LOG_ERROR("failed to set direction");
}
}
static int is_keepalive_packet(const char *data, int len)
{
if (data == NULL || len < (int)(sizeof(struct ethhdr)))
{
return 0;
}
struct ethhdr *eth_hdr = (struct ethhdr *)data;
if (eth_hdr->h_proto == 0xAAAA)
{
return 1;
}
else
{
return 0;
}
}
static void origin_free_cb(struct packet *pkt, void *args)
{
struct mars_io *mars_io = (struct mars_io *)args;
struct packet_origin *origin = packet_get_origin(pkt);
marsio_buff_t *mbuff = (marsio_buff_t *)origin->ctx;
struct packet_io_stat *stat = &mars_io->stat[origin->thr_idx];
stat->pkts_user_freed++;
stat->bytes_user_freed += packet_get_raw_len(pkt);
marsio_buff_free(mars_io->mr_ins, &mbuff, 1, 0, origin->thr_idx);
}
static struct packet *recv_packet(struct mars_io *mars_io, marsio_buff_t *mbuff, uint16_t thr_idx)
{
struct packet_io_stat *stat = &mars_io->stat[thr_idx];
struct ip_reassembly *ip_reass = mars_io->ip_reass[thr_idx];
struct packet_pool *pool = mars_io->pool[thr_idx];
int len = marsio_buff_datalen(mbuff);
char *data = marsio_buff_mtod(mbuff);
stat->pkts_rx++;
stat->bytes_rx += len;
if (is_keepalive_packet(data, len))
{
stat->keep_alive_pkts++;
stat->keep_alive_bytes += len;
stat->pkts_tx++;
stat->bytes_tx += len;
marsio_send_burst(mars_io->mr_path, thr_idx, &mbuff, 1);
return NULL;
}
else
{
struct packet *pkt = packet_pool_pop(pool);
assert(pkt != NULL);
struct packet_origin origin = {
.type = ORIGIN_TYPE_MR,
.ctx = mbuff,
.cb = origin_free_cb,
.args = mars_io,
.thr_idx = thr_idx,
};
packet_parse(pkt, data, len);
packet_set_metadata(pkt, mbuff);
packet_set_origin(pkt, &origin);
if (packet_is_fragment(pkt))
{
return ip_reassembly_defrag(ip_reass, pkt, clock_get_real_time_ms());
}
else
{
return pkt;
}
}
}
static void send_packet(struct mars_io *mars_io, struct packet *pkt, uint16_t thr_idx)
{
marsio_buff_t *mbuff = NULL;
struct packet_io_stat *stat = &mars_io->stat[thr_idx];
int len = packet_get_raw_len(pkt);
struct packet_origin *origin = packet_get_origin(pkt);
if (origin->type == ORIGIN_TYPE_MR)
{
mbuff = (marsio_buff_t *)origin->ctx;
mbuff_set_metadata(mbuff, pkt);
marsio_send_burst(mars_io->mr_path, thr_idx, &mbuff, 1);
packet_pool_push(mars_io->pool[thr_idx], pkt);
}
else
{
if (marsio_buff_malloc_global(mars_io->mr_ins, &mbuff, 1, MARSIO_SOCKET_ID_ANY, MARSIO_LCORE_ID_ANY) < 0)
{
MARS_IO_LOG_ERROR("unable to allocate marsio buffer for inject packet");
packet_free(pkt);
return;
}
else
{
stat->pkts_injected++;
stat->bytes_injected += len;
char *ptr = marsio_buff_append(mbuff, len);
memcpy(ptr, packet_get_raw_data(pkt), len);
mbuff_set_metadata(mbuff, pkt);
marsio_send_burst_with_options(mars_io->mr_path, thr_idx, &mbuff, 1, MARSIO_SEND_OPT_REHASH);
packet_free(pkt);
}
}
stat->pkts_tx++;
stat->bytes_tx += len;
}
static void drop_packet(struct mars_io *mars_io, struct packet *pkt, uint16_t thr_idx)
{
struct packet_io_stat *stat = &mars_io->stat[thr_idx];
int len = packet_get_raw_len(pkt);
struct packet_origin *origin = packet_get_origin(pkt);
stat->pkts_dropped++;
stat->bytes_dropped += len;
if (origin->type == ORIGIN_TYPE_MR)
{
marsio_buff_t *mbuff = (marsio_buff_t *)origin->ctx;
marsio_buff_free(mars_io->mr_ins, &mbuff, 1, 0, thr_idx);
packet_pool_push(mars_io->pool[thr_idx], pkt);
}
else
{
packet_free(pkt);
}
}
/******************************************************************************
* Public API
******************************************************************************/
void *mars_io_new(const char *toml_file)
{
int opt = 1;
cpu_set_t coremask;
CPU_ZERO(&coremask);
struct mars_io *mars_io = (struct mars_io *)calloc(1, sizeof(struct mars_io));
if (mars_io == NULL)
{
MARS_IO_LOG_ERROR("unable to allocate memory for mars_io");
return NULL;
}
mars_io->cfg = mars_io_cfg_new(toml_file);
if (mars_io->cfg == NULL)
{
MARS_IO_LOG_ERROR("unable to create mars_io_cfg");
goto error_out;
}
mars_io_cfg_print(mars_io->cfg);
for (uint16_t i = 0; i < mars_io->cfg->thread_num; i++)
{
CPU_SET(mars_io->cfg->cpu_mask[i], &coremask);
}
mars_io->mr_ins = marsio_create();
if (mars_io->mr_ins == NULL)
{
MARS_IO_LOG_ERROR("unable to create marsio instance");
goto error_out;
}
marsio_option_set(mars_io->mr_ins, MARSIO_OPT_THREAD_MASK_IN_CPUSET, &coremask, sizeof(cpu_set_t));
marsio_option_set(mars_io->mr_ins, MARSIO_OPT_EXIT_WHEN_ERR, &opt, sizeof(opt));
if (marsio_init(mars_io->mr_ins, mars_io->cfg->app_symbol) != 0)
{
MARS_IO_LOG_ERROR("unable to init marsio instance");
goto error_out;
}
mars_io->mr_dev = marsio_open_device(mars_io->mr_ins, mars_io->cfg->dev_symbol, mars_io->cfg->thread_num, mars_io->cfg->thread_num);
if (mars_io->mr_dev == NULL)
{
MARS_IO_LOG_ERROR("unable to open marsio device");
goto error_out;
}
mars_io->mr_path = marsio_sendpath_create_by_vdev(mars_io->mr_dev);
if (mars_io->mr_path == NULL)
{
MARS_IO_LOG_ERROR("unable to create marsio sendpath");
goto error_out;
}
for (uint64_t i = 0; i < mars_io->cfg->thread_num; i++)
{
mars_io->pool[i] = packet_pool_new(mars_io->cfg->capacity);
if (mars_io->pool[i] == NULL)
{
MARS_IO_LOG_ERROR("unable to create packet pool");
goto error_out;
}
mars_io->ip_reass[i] = ip_reassembly_new(mars_io->cfg->timeout_ms, mars_io->cfg->frag_queue_num, mars_io->cfg->frag_queue_size);
if (mars_io->ip_reass[i] == NULL)
{
MARS_IO_LOG_ERROR("unable to create ip reassembly");
goto error_out;
}
}
return mars_io;
error_out:
mars_io_free(mars_io);
return NULL;
}
void mars_io_free(void *handle)
{
struct mars_io *mars_io = (struct mars_io *)handle;
if (mars_io)
{
for (uint64_t i = 0; i < mars_io->cfg->thread_num; i++)
{
ip_reassembly_free(mars_io->ip_reass[i]);
packet_pool_free(mars_io->pool[i]);
}
if (mars_io->mr_path)
{
marsio_sendpath_destory(mars_io->mr_path);
mars_io->mr_path = NULL;
}
if (mars_io->mr_dev)
{
marsio_close_device(mars_io->mr_dev);
mars_io->mr_dev = NULL;
}
if (mars_io->mr_ins)
{
marsio_destory(mars_io->mr_ins);
mars_io->mr_ins = NULL;
}
mars_io_cfg_free(mars_io->cfg);
free(mars_io);
mars_io = NULL;
}
}
int mars_io_isbreak(void *handle __attribute__((unused)))
{
return 0;
}
int mars_io_init(void *handle, uint16_t thr_idx __attribute__((unused)))
{
struct mars_io *mars_io = (struct mars_io *)handle;
if (marsio_thread_init(mars_io->mr_ins) != 0)
{
MARS_IO_LOG_ERROR("unable to init marsio thread");
return -1;
}
else
{
return 0;
}
}
int mars_io_recv(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
struct packet *pkt = NULL;
marsio_buff_t *mbuff = NULL;
marsio_buff_t *mbuffs[RX_BURST_MAX];
struct mars_io *mars_io = (struct mars_io *)handle;
int ret = 0;
int nr_recv = marsio_recv_burst(mars_io->mr_dev, thr_idx, mbuffs, MIN(RX_BURST_MAX, nr_pkts));
for (int i = 0; i < nr_recv; i++)
{
mbuff = mbuffs[i];
pkt = recv_packet(mars_io, mbuff, thr_idx);
if (pkt)
{
pkts[ret++] = pkt;
}
}
return ret;
}
void mars_io_send(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
struct packet *frag = NULL;
struct packet *pkt = NULL;
struct mars_io *mars_io = (struct mars_io *)handle;
for (int i = 0; i < nr_pkts; i++)
{
pkt = pkts[i];
if (packet_is_defraged(pkt))
{
while ((frag = packet_pop_frag(pkt)))
{
send_packet(mars_io, frag, thr_idx);
}
packet_free(pkt);
}
else
{
send_packet(mars_io, pkt, thr_idx);
}
pkts[i] = NULL;
}
}
void mars_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
struct packet *pkt = NULL;
struct packet *frag = NULL;
struct mars_io *mars_io = (struct mars_io *)handle;
for (int i = 0; i < nr_pkts; i++)
{
pkt = pkts[i];
if (packet_is_defraged(pkt))
{
while ((frag = packet_pop_frag(pkt)))
{
drop_packet(mars_io, frag, thr_idx);
}
packet_free(pkt);
}
else
{
drop_packet(mars_io, pkt, thr_idx);
}
pkts[i] = NULL;
}
}
void mars_io_yield(void *handle, uint16_t thr_idx)
{
struct mars_io *mars_io = (struct mars_io *)handle;
struct mr_vdev *vdevs[1] = {
mars_io->mr_dev,
};
marsio_poll_wait(mars_io->mr_ins, vdevs, 1, thr_idx, mars_io->cfg->idle_yield_ms);
}
void mars_io_polling(void *handle, uint16_t thr_idx)
{
struct mars_io *mars_io = (struct mars_io *)handle;
struct ip_reassembly *ip_reass = mars_io->ip_reass[thr_idx];
struct packet *pkt = NULL;
uint64_t now_ms = clock_get_real_time_ms();
while ((pkt = ip_reassembly_clean(ip_reass, now_ms)))
{
if (mars_io->cfg->fail_action == 0)
{
send_packet(mars_io, pkt, thr_idx);
}
else
{
drop_packet(mars_io, pkt, thr_idx);
}
}
// TODO
// output stat
}
struct packet_io_stat *mars_io_stat(void *handle, uint16_t thr_idx)
{
struct mars_io *mars_io = (struct mars_io *)handle;
return &mars_io->stat[thr_idx];
}

24
infra/packet_io/mars_io.h Normal file
View File

@@ -0,0 +1,24 @@
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "packet_io.h"
void *mars_io_new(const char *toml_file);
void mars_io_free(void *handle);
int mars_io_isbreak(void *handle);
int mars_io_init(void *handle, uint16_t thr_idx);
int mars_io_recv(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void mars_io_send(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void mars_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void mars_io_yield(void *handle, uint16_t thr_idx);
void mars_io_polling(void *handle, uint16_t thr_idx);
struct packet_io_stat *mars_io_stat(void *handle, uint16_t thr_idx);
#ifdef __cplusplus
}
#endif

View File

@@ -1,422 +0,0 @@
#include <sched.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <netinet/ether.h>
#include "marsio.h"
#include "marsio_io.h"
#include "log_internal.h"
#include "packet_parser.h"
#include "packet_internal.h"
#define MARSIO_IO_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "marsio io", format, ##__VA_ARGS__)
struct marsio_io
{
struct packet_io_config cfg;
struct mr_instance *mr_ins;
struct mr_vdev *mr_dev;
struct mr_sendpath *mr_path;
struct packet_io_stat stat[MAX_THREAD_NUM];
};
/******************************************************************************
* Private API
******************************************************************************/
static void metadata_from_mbuff_to_packet(marsio_buff_t *mbuff, struct packet *pkt)
{
struct route_ctx route_ctx = {};
struct sids sids = {};
uint64_t session_id = {0};
uint16_t link_id = {0};
int is_ctrl = {0};
enum packet_direction direction = PACKET_DIRECTION_OUTGOING;
route_ctx.used = marsio_buff_get_metadata(mbuff, MR_BUFF_ROUTE_CTX, &route_ctx.data, sizeof(route_ctx.data));
if (route_ctx.used > 0)
{
packet_set_route_ctx(pkt, &route_ctx);
}
else
{
MARSIO_IO_LOG_ERROR("failed to get route ctx");
}
sids.used = marsio_buff_get_sid_list(mbuff, sids.sid, sizeof(sids.sid) / sizeof(sids.sid[0]));
if (sids.used > 0)
{
packet_set_sids(pkt, &sids);
}
else
{
MARSIO_IO_LOG_ERROR("failed to get sids");
}
if (marsio_buff_get_metadata(mbuff, MR_BUFF_SESSION_ID, &session_id, sizeof(session_id)) == sizeof(session_id))
{
packet_set_session_id(pkt, session_id);
}
else
{
MARSIO_IO_LOG_ERROR("failed to get session id");
}
// TODO
#if 0
if (marsio_buff_get_metadata(mbuff, MR_BUFF_DOMAIN, &domain, sizeof(domain)) == sizeof(domain))
{
packet_set_domain(pkt, domain);
}
else
{
MARSIO_IO_LOG_ERROR("failed to get domain id");
}
#endif
if (marsio_buff_get_metadata(mbuff, MR_BUFF_LINK_ID, &link_id, sizeof(link_id)) == sizeof(link_id))
{
packet_set_link_id(pkt, link_id);
}
else
{
MARSIO_IO_LOG_ERROR("failed to get link id");
}
is_ctrl = marsio_buff_is_ctrlbuf(mbuff);
packet_set_ctrl(pkt, is_ctrl);
if (marsio_buff_get_metadata(mbuff, MR_BUFF_DIR, &direction, sizeof(direction)) == sizeof(direction))
{
packet_set_direction(pkt, direction);
}
else
{
MARSIO_IO_LOG_ERROR("failed to get direction");
}
packet_set_action(pkt, PACKET_ACTION_FORWARD);
packet_set_origin_ctx(pkt, mbuff);
// TODO
const struct timeval tv = {};
packet_set_timeval(pkt, &tv);
}
static void metadata_from_packet_to_mbuff(struct packet *pkt, marsio_buff_t *mbuff)
{
const struct route_ctx *route_ctx = packet_get_route_ctx(pkt);
const struct sids *sids = packet_get_sids(pkt);
uint64_t session_id = packet_get_session_id(pkt);
// uint64_t domain = packet_get_domain(pkt);
uint16_t link_id = packet_get_link_id(pkt);
int is_ctrl = packet_is_ctrl(pkt);
enum packet_direction direction = packet_get_direction(pkt);
if (marsio_buff_set_metadata(mbuff, MR_BUFF_ROUTE_CTX, (void *)route_ctx->data, route_ctx->used) != 0)
{
MARSIO_IO_LOG_ERROR("failed to set route ctx");
}
if (marsio_buff_set_sid_list(mbuff, (sid_t *)sids->sid, sids->used) != 0)
{
MARSIO_IO_LOG_ERROR("failed to set sids");
}
if (marsio_buff_set_metadata(mbuff, MR_BUFF_SESSION_ID, &session_id, sizeof(session_id)) != 0)
{
MARSIO_IO_LOG_ERROR("failed to set session id");
}
// TODO
#if 0
if (marsio_buff_set_metadata(mbuff, MR_BUFF_DOMAIN, &domain, sizeof(domain)) != 0)
{
MARSIO_IO_LOG_ERROR("failed to set domain id");
}
#endif
if (marsio_buff_set_metadata(mbuff, MR_BUFF_LINK_ID, &link_id, sizeof(link_id)) != 0)
{
MARSIO_IO_LOG_ERROR("failed to set link id");
}
if (is_ctrl)
{
marsio_buff_set_ctrlbuf(mbuff);
}
if (marsio_buff_set_metadata(mbuff, MR_BUFF_DIR, &direction, sizeof(direction)) != 0)
{
MARSIO_IO_LOG_ERROR("failed to set direction");
}
}
static inline int is_keepalive_packet(const char *data, int len)
{
if (data == NULL || len < (int)(sizeof(struct ethhdr)))
{
return 0;
}
struct ethhdr *eth_hdr = (struct ethhdr *)data;
if (eth_hdr->h_proto == 0xAAAA)
{
return 1;
}
else
{
return 0;
}
}
/******************************************************************************
* Public API
******************************************************************************/
void *marsio_io_new(const struct packet_io_config *cfg)
{
int opt = 1;
cpu_set_t coremask;
CPU_ZERO(&coremask);
struct marsio_io *handle = (struct marsio_io *)calloc(1, sizeof(struct marsio_io));
if (handle == NULL)
{
MARSIO_IO_LOG_ERROR("unable to allocate memory for marsio_io");
return NULL;
}
memcpy(&handle->cfg, cfg, sizeof(struct packet_io_config));
for (uint16_t i = 0; i < handle->cfg.nr_worker_thread; i++)
{
CPU_SET(handle->cfg.cpu_mask[i], &coremask);
}
handle->mr_ins = marsio_create();
if (handle->mr_ins == NULL)
{
MARSIO_IO_LOG_ERROR("unable to create marsio instance");
goto error_out;
}
marsio_option_set(handle->mr_ins, MARSIO_OPT_THREAD_MASK_IN_CPUSET, &coremask, sizeof(cpu_set_t));
marsio_option_set(handle->mr_ins, MARSIO_OPT_EXIT_WHEN_ERR, &opt, sizeof(opt));
if (marsio_init(handle->mr_ins, handle->cfg.app_symbol) != 0)
{
MARSIO_IO_LOG_ERROR("unable to init marsio instance");
goto error_out;
}
handle->mr_dev = marsio_open_device(handle->mr_ins, handle->cfg.dev_symbol, handle->cfg.nr_worker_thread, handle->cfg.nr_worker_thread);
if (handle->mr_dev == NULL)
{
MARSIO_IO_LOG_ERROR("unable to open marsio device");
goto error_out;
}
handle->mr_path = marsio_sendpath_create_by_vdev(handle->mr_dev);
if (handle->mr_path == NULL)
{
MARSIO_IO_LOG_ERROR("unable to create marsio sendpath");
goto error_out;
}
return handle;
error_out:
marsio_io_free(handle);
return NULL;
}
void marsio_io_free(void *handle)
{
struct marsio_io *mr_io = (struct marsio_io *)handle;
if (mr_io)
{
if (mr_io->mr_path)
{
marsio_sendpath_destory(mr_io->mr_path);
mr_io->mr_path = NULL;
}
if (mr_io->mr_dev)
{
marsio_close_device(mr_io->mr_dev);
mr_io->mr_dev = NULL;
}
if (mr_io->mr_ins)
{
marsio_destory(mr_io->mr_ins);
mr_io->mr_ins = NULL;
}
free(mr_io);
mr_io = NULL;
}
}
int marsio_io_isbreak(void *handle __attribute__((unused)))
{
return 0;
}
int marsio_io_init(void *handle, uint16_t thr_idx __attribute__((unused)))
{
struct marsio_io *mr_io = (struct marsio_io *)handle;
if (marsio_thread_init(mr_io->mr_ins) != 0)
{
MARSIO_IO_LOG_ERROR("unable to init marsio thread");
return -1;
}
return 0;
}
uint16_t marsio_io_ingress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
{
int len;
char *data;
uint16_t nr_packet_parsed = 0;
struct packet *pkt;
marsio_buff_t *mbuff;
marsio_buff_t *rx_buffs[RX_BURST_MAX];
struct marsio_io *mr_io = (struct marsio_io *)handle;
struct packet_io_stat *stat = &mr_io->stat[thr_idx];
int nr_packet_received = marsio_recv_burst(mr_io->mr_dev, thr_idx, rx_buffs, MIN(RX_BURST_MAX, nr_pkts));
if (nr_packet_received <= 0)
{
return nr_packet_parsed;
}
for (int i = 0; i < nr_packet_received; i++)
{
mbuff = rx_buffs[i];
data = marsio_buff_mtod(mbuff);
len = marsio_buff_datalen(mbuff);
stat->pkts_rx++;
stat->bytes_rx += len;
if (is_keepalive_packet(data, len))
{
stat->keep_alive_pkts++;
stat->keep_alive_bytes += len;
stat->pkts_tx++;
stat->bytes_tx += len;
marsio_send_burst(mr_io->mr_path, thr_idx, &mbuff, 1);
continue;
}
pkt = &pkts[nr_packet_parsed++];
packet_parse(pkt, data, len);
metadata_from_mbuff_to_packet(mbuff, pkt);
if (marsio_buff_is_ctrlbuf(mbuff))
{
stat->ctrl_pkts_rx++;
stat->ctrl_bytes_rx += len;
}
else
{
stat->raw_pkts_rx++;
stat->raw_bytes_rx += len;
}
}
return nr_packet_parsed;
}
void marsio_io_egress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
{
int is_injected_packet = 0;
int len;
char *ptr;
struct packet *pkt;
marsio_buff_t *mbuff;
struct marsio_io *mr_io = (struct marsio_io *)handle;
struct packet_io_stat *stat = &mr_io->stat[thr_idx];
for (uint16_t i = 0; i < nr_pkts; i++)
{
is_injected_packet = 0;
pkt = &pkts[i];
len = packet_get_raw_len(pkt);
mbuff = (marsio_buff_t *)packet_get_origin_ctx(pkt);
if (mbuff == NULL)
{
if (marsio_buff_malloc_global(mr_io->mr_ins, &mbuff, 1, MARSIO_SOCKET_ID_ANY, MARSIO_LCORE_ID_ANY) < 0)
{
MARSIO_IO_LOG_ERROR("unable to allocate marsio buffer for inject packet");
continue;
}
ptr = marsio_buff_append(mbuff, len);
memcpy(ptr, packet_get_raw_data(pkt), len);
is_injected_packet = 1;
}
metadata_from_packet_to_mbuff(pkt, mbuff);
stat->pkts_tx++;
stat->bytes_tx += len;
if (packet_is_ctrl(pkt))
{
stat->ctrl_pkts_tx++;
stat->ctrl_bytes_tx += len;
}
else
{
stat->raw_pkts_tx++;
stat->raw_bytes_tx += len;
}
if (is_injected_packet)
{
stat->pkts_injected++;
stat->bytes_injected += len;
marsio_send_burst_with_options(mr_io->mr_path, thr_idx, &mbuff, 1, MARSIO_SEND_OPT_REHASH);
}
else
{
marsio_send_burst(mr_io->mr_path, thr_idx, &mbuff, 1);
}
}
}
void marsio_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
{
struct packet *pkt;
marsio_buff_t *mbuff;
struct marsio_io *mr_io = (struct marsio_io *)handle;
struct packet_io_stat *stat = &mr_io->stat[thr_idx];
for (uint16_t i = 0; i < nr_pkts; i++)
{
pkt = &pkts[i];
mbuff = (marsio_buff_t *)packet_get_origin_ctx(pkt);
if (mbuff)
{
stat->pkts_dropped++;
stat->bytes_dropped += packet_get_raw_len(pkt);
marsio_buff_free(mr_io->mr_ins, &mbuff, 1, 0, thr_idx);
}
}
}
void marsio_io_yield(void *handle, uint16_t thr_idx)
{
struct marsio_io *mr_io = (struct marsio_io *)handle;
struct mr_vdev *vdevs[1] = {
mr_io->mr_dev,
};
marsio_poll_wait(mr_io->mr_ins, vdevs, 1, thr_idx, mr_io->cfg.idle_yield_interval_ms);
}
struct packet_io_stat *marsio_io_stat(void *handle, uint16_t thr_idx)
{
struct marsio_io *mr_io = (struct marsio_io *)handle;
return &mr_io->stat[thr_idx];
}

View File

@@ -1,23 +0,0 @@
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "packet_io.h"
void *marsio_io_new(const struct packet_io_config *cfg);
void marsio_io_free(void *handle);
int marsio_io_isbreak(void *handle);
int marsio_io_init(void *handle, uint16_t thr_idx);
uint16_t marsio_io_ingress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void marsio_io_egress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void marsio_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void marsio_io_yield(void *handle, uint16_t thr_idx);
struct packet_io_stat *marsio_io_stat(void *handle, uint16_t thr_idx);
#ifdef __cplusplus
}
#endif

View File

@@ -1,267 +1,50 @@
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include "toml.h"
#include "pcap_io.h"
#include "marsio_io.h"
#include "mars_io.h"
#include "log_internal.h"
#include "utils_internal.h"
#define PACKET_IO_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "packet io", format, ##__VA_ARGS__)
#define PACKET_IO_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "packet io", format, ##__VA_ARGS__)
struct packet_io
{
struct packet_io_config *cfg;
void *handle;
void *(*new_func)(const struct packet_io_config *cfg);
void *(*new_func)(const char *toml_file);
void (*free_func)(void *handle);
int (*isbreak_func)(void *handle);
int (*init_func)(void *handle, uint16_t thr_idx);
uint16_t (*ingress_func)(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void (*egress_func)(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void (*drop_func)(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
int (*recv_func)(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void (*send_func)(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void (*drop_func)(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void (*yield_func)(void *handle, uint16_t thr_idx);
void (*polling_func)(void *handle, uint16_t thr_idx);
struct packet_io_stat *(*stat_func)(void *handle, uint16_t thr_idx);
};
static struct packet_io_config *packet_io_config_new(const char *toml_file)
{
int ret = -1;
const char *ptr;
char *ptr_mode = NULL;
char *ptr_pcap_path = NULL;
char *ptr_app_symbol = NULL;
char *ptr_dev_symbol = NULL;
char errbuf[200];
FILE *fp = NULL;
toml_table_t *root = NULL;
toml_table_t *table = NULL;
toml_array_t *mask;
struct packet_io_config *cfg = (struct packet_io_config *)calloc(1, sizeof(struct packet_io_config));
if (cfg == NULL)
{
return NULL;
}
fp = fopen(toml_file, "r");
if (fp == NULL)
{
PACKET_IO_LOG_ERROR("config file %s open failed, %s", toml_file, strerror(errno));
goto error_out;
}
root = toml_parse_file(fp, errbuf, sizeof(errbuf));
if (root == NULL)
{
PACKET_IO_LOG_ERROR("config file %s parse failed, %s", toml_file, errbuf);
goto error_out;
}
table = toml_table_in(root, "packet_io");
if (table == NULL)
{
PACKET_IO_LOG_ERROR("config file %s missing packet_io", toml_file);
goto error_out;
}
ptr = toml_raw_in(table, "mode");
if (ptr == NULL || toml_rtos(ptr, &ptr_mode) != 0)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.mode");
goto error_out;
}
if (strcmp(ptr_mode, "pcapfile") == 0)
{
cfg->mode = PACKET_IO_PCAPFILE;
}
else if (strcmp(ptr_mode, "pcaplist") == 0)
{
cfg->mode = PACKET_IO_PCAPLIST;
}
else if (strcmp(ptr_mode, "marsio") == 0)
{
cfg->mode = PACKET_IO_MARSIO;
}
else
{
PACKET_IO_LOG_ERROR("config file invalid packet_io.mode %s", ptr);
goto error_out;
}
if (cfg->mode == PACKET_IO_PCAPFILE || cfg->mode == PACKET_IO_PCAPLIST)
{
ptr = toml_raw_in(table, "pcap_path");
if (ptr == NULL || toml_rtos(ptr, &ptr_pcap_path) != 0)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.pcap_path");
goto error_out;
}
strcpy(cfg->pcap_path, ptr_pcap_path);
}
else
{
ptr = toml_raw_in(table, "app_symbol");
if (ptr == NULL || toml_rtos(ptr, &ptr_app_symbol) != 0)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.app_symbol");
goto error_out;
}
strcpy(cfg->app_symbol, ptr_app_symbol);
ptr = toml_raw_in(table, "dev_symbol");
if (ptr == NULL || toml_rtos(ptr, &ptr_dev_symbol) != 0)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.dev_symbol");
goto error_out;
}
strcpy(cfg->dev_symbol, ptr_dev_symbol);
}
ptr = toml_raw_in(table, "nr_worker_thread");
if (ptr == NULL)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.nr_worker_thread");
goto error_out;
}
cfg->nr_worker_thread = atoi(ptr);
if (cfg->nr_worker_thread == 0 || cfg->nr_worker_thread > MAX_THREAD_NUM)
{
PACKET_IO_LOG_ERROR("config file invalid packet_io.nr_worker_thread %d, range [1, %d]", cfg->nr_worker_thread, MAX_THREAD_NUM);
goto error_out;
}
mask = toml_array_in(table, "cpu_mask");
if (mask == NULL)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.cpu_mask");
goto error_out;
}
for (uint16_t i = 0; i < cfg->nr_worker_thread; i++)
{
ptr = toml_raw_at(mask, i);
if (ptr == NULL)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.cpu_mask[%d]", i);
goto error_out;
}
cfg->cpu_mask[i] = atoi(ptr);
}
ptr = toml_raw_in(table, "idle_yield_interval_ms");
if (ptr == NULL)
{
PACKET_IO_LOG_ERROR("config file missing packet_io.idle_yield_interval_ms");
goto error_out;
}
cfg->idle_yield_interval_ms = atoll(ptr);
if (cfg->idle_yield_interval_ms > 60000)
{
PACKET_IO_LOG_ERROR("config file invalid packet_io.idle_yield_interval_ms %d, range [0, %d]", cfg->idle_yield_interval_ms, 60000);
goto error_out;
}
ret = 0;
error_out:
if (ptr_mode)
{
free(ptr_mode);
}
if (ptr_pcap_path)
{
free(ptr_pcap_path);
}
if (ptr_app_symbol)
{
free(ptr_app_symbol);
}
if (ptr_dev_symbol)
{
free(ptr_dev_symbol);
}
if (root)
{
toml_free(root);
}
if (fp)
{
fclose(fp);
}
if (ret == -1)
{
free(cfg);
return NULL;
}
else
{
return cfg;
}
}
static void packet_io_config_free(struct packet_io_config *cfg)
{
if (cfg)
{
free(cfg);
cfg = NULL;
}
}
static void packet_io_config_print(const struct packet_io_config *cfg)
{
if (cfg)
{
PACKET_IO_LOG_INFO("packet_io.mode : %s", cfg->mode == PACKET_IO_PCAPFILE ? "pcapfile" : (cfg->mode == PACKET_IO_PCAPLIST ? "pcaplist" : "marsio"));
if (cfg->mode == PACKET_IO_PCAPFILE || cfg->mode == PACKET_IO_PCAPLIST)
{
PACKET_IO_LOG_INFO("packet_io.pcap_path : %s", cfg->pcap_path);
}
else
{
PACKET_IO_LOG_INFO("packet_io.app_symbol : %s", cfg->app_symbol);
PACKET_IO_LOG_INFO("packet_io.dev_symbol : %s", cfg->dev_symbol);
}
PACKET_IO_LOG_INFO("packet_io.nr_worker_thread : %d", cfg->nr_worker_thread);
for (uint16_t i = 0; i < cfg->nr_worker_thread; i++)
{
PACKET_IO_LOG_INFO("packet_io.cpu_mask[%03d] : %d", i, cfg->cpu_mask[i]);
}
PACKET_IO_LOG_INFO("packet_io.idle_yield_interval_ms : %lu", cfg->idle_yield_interval_ms);
}
}
struct packet_io *packet_io_new(const char *toml_file)
{
char mode[64] = {0};
struct packet_io *pkt_io = (struct packet_io *)calloc(1, sizeof(struct packet_io));
if (pkt_io == NULL)
{
return NULL;
}
pkt_io->cfg = packet_io_config_new(toml_file);
if (pkt_io->cfg == NULL)
load_toml_str_config(toml_file, "packet_io.mode", mode);
if (strcmp(mode, "marsio") == 0)
{
free(pkt_io);
return NULL;
}
packet_io_config_print(pkt_io->cfg);
if (pkt_io->cfg->mode == PACKET_IO_MARSIO)
{
pkt_io->new_func = marsio_io_new;
pkt_io->free_func = marsio_io_free;
pkt_io->isbreak_func = marsio_io_isbreak;
pkt_io->init_func = marsio_io_init;
pkt_io->ingress_func = marsio_io_ingress;
pkt_io->egress_func = marsio_io_egress;
pkt_io->drop_func = marsio_io_drop;
pkt_io->yield_func = marsio_io_yield;
pkt_io->stat_func = marsio_io_stat;
pkt_io->new_func = mars_io_new;
pkt_io->free_func = mars_io_free;
pkt_io->isbreak_func = mars_io_isbreak;
pkt_io->init_func = mars_io_init;
pkt_io->recv_func = mars_io_recv;
pkt_io->send_func = mars_io_send;
pkt_io->drop_func = mars_io_drop;
pkt_io->yield_func = mars_io_yield;
pkt_io->polling_func = mars_io_polling;
pkt_io->stat_func = mars_io_stat;
}
else
{
@@ -269,14 +52,15 @@ struct packet_io *packet_io_new(const char *toml_file)
pkt_io->free_func = pcap_io_free;
pkt_io->isbreak_func = pcap_io_isbreak;
pkt_io->init_func = pcap_io_init;
pkt_io->ingress_func = pcap_io_ingress;
pkt_io->egress_func = pcap_io_egress;
pkt_io->recv_func = pcap_io_recv;
pkt_io->send_func = pcap_io_send;
pkt_io->drop_func = pcap_io_drop;
pkt_io->yield_func = pcap_io_yield;
pkt_io->polling_func = pcap_io_polling;
pkt_io->stat_func = pcap_io_stat;
}
pkt_io->handle = pkt_io->new_func(pkt_io->cfg);
pkt_io->handle = pkt_io->new_func(toml_file);
if (pkt_io->handle == NULL)
{
packet_io_free(pkt_io);
@@ -294,10 +78,6 @@ void packet_io_free(struct packet_io *pkt_io)
{
pkt_io->free_func(pkt_io->handle);
}
if (pkt_io->cfg)
{
packet_io_config_free(pkt_io->cfg);
}
free(pkt_io);
pkt_io = NULL;
}
@@ -313,17 +93,17 @@ int packet_io_init(struct packet_io *pkt_io, uint16_t thr_idx)
return pkt_io->init_func(pkt_io->handle, thr_idx);
}
uint16_t packet_io_ingress(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
int packet_io_recv(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
return pkt_io->ingress_func(pkt_io->handle, thr_idx, pkts, nr_pkts);
return pkt_io->recv_func(pkt_io->handle, thr_idx, pkts, nr_pkts);
}
void packet_io_egress(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
void packet_io_send(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
pkt_io->egress_func(pkt_io->handle, thr_idx, pkts, nr_pkts);
pkt_io->send_func(pkt_io->handle, thr_idx, pkts, nr_pkts);
}
void packet_io_drop(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
void packet_io_drop(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
pkt_io->drop_func(pkt_io->handle, thr_idx, pkts, nr_pkts);
}
@@ -333,6 +113,11 @@ void packet_io_yield(struct packet_io *pkt_io, uint16_t thr_idx)
pkt_io->yield_func(pkt_io->handle, thr_idx);
}
void packet_io_polling(struct packet_io *pkt_io, uint16_t thr_idx)
{
pkt_io->polling_func(pkt_io->handle, thr_idx);
}
struct packet_io_stat *packet_io_stat(struct packet_io *pkt_io, uint16_t thr_idx)
{
return pkt_io->stat_func(pkt_io->handle, thr_idx);

View File

@@ -6,9 +6,8 @@ extern "C"
#endif
#include <stdint.h>
#include <limits.h>
#include "utils.h"
#include "stellar/packet.h"
struct packet_io_stat
{
@@ -23,13 +22,6 @@ struct packet_io_stat
uint64_t keep_alive_pkts;
uint64_t keep_alive_bytes;
// raw packet
uint64_t raw_pkts_rx;
uint64_t raw_bytes_rx;
uint64_t raw_pkts_tx;
uint64_t raw_bytes_tx;
// drop packet
uint64_t pkts_dropped;
uint64_t bytes_dropped;
@@ -38,43 +30,22 @@ struct packet_io_stat
uint64_t pkts_injected;
uint64_t bytes_injected;
// ctrl packet
uint64_t ctrl_pkts_rx;
uint64_t ctrl_bytes_rx;
uint64_t ctrl_pkts_tx;
uint64_t ctrl_bytes_tx;
// user freed
uint64_t pkts_user_freed;
uint64_t bytes_user_freed;
} __attribute__((aligned(64)));
enum packet_io_mode
{
PACKET_IO_PCAPFILE = 0,
PACKET_IO_PCAPLIST = 1,
PACKET_IO_MARSIO = 2,
};
struct packet_io_config
{
enum packet_io_mode mode;
char pcap_path[PATH_MAX];
char app_symbol[64];
char dev_symbol[64];
uint16_t nr_worker_thread; // range [1, MAX_THREAD_NUM]
uint16_t cpu_mask[MAX_THREAD_NUM];
uint64_t idle_yield_interval_ms; // range: [0, 6000] (ms)
};
struct packet;
struct packet_io;
struct packet_io *packet_io_new(const char *toml_file);
void packet_io_free(struct packet_io *pkt_io);
int packet_io_isbreak(struct packet_io *pkt_io);
int packet_io_init(struct packet_io *pkt_io, uint16_t thr_idx);
uint16_t packet_io_ingress(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void packet_io_egress(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void packet_io_drop(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
int packet_io_recv(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void packet_io_send(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void packet_io_drop(struct packet_io *pkt_io, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void packet_io_yield(struct packet_io *pkt_io, uint16_t thr_idx);
void packet_io_polling(struct packet_io *pkt_io, uint16_t thr_idx);
struct packet_io_stat *packet_io_stat(struct packet_io *pkt_io, uint16_t thr_idx);
#ifdef __cplusplus

View File

@@ -1,42 +1,23 @@
#include <pcap/pcap.h>
#include <pthread.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <limits.h>
#include <pthread.h>
#include <pcap/pcap.h>
#include "tuple.h"
#include "utils.h"
#include "log_internal.h"
#include "pcap_io.h"
#include "packet_dump.h"
#include "packet_pool.h"
#include "packet_parser.h"
#include "ip_reassembly.h"
#include "log_internal.h"
#include "packet_internal.h"
#include "utils_internal.h"
#define PCAP_IO_LOG_FATAL(format, ...) STELLAR_LOG_FATAL(__thread_local_logger, "pcap io", format, ##__VA_ARGS__)
#define PCAP_IO_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "pcap io", format, ##__VA_ARGS__)
#define PCAP_IO_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "pcap io", format, ##__VA_ARGS__)
#define MAX_PACKET_QUEUE_SIZE (4096 * 1000)
struct pcap_io
{
struct packet_io_config cfg;
pcap_t *pcap;
struct logger *logger;
struct packet_queue *queue[MAX_THREAD_NUM];
struct packet_io_stat stat[MAX_THREAD_NUM];
uint64_t io_thread_need_exit;
uint64_t io_thread_is_runing;
uint64_t io_thread_wait_exit;
uint64_t read_pcap_files;
uint64_t read_pcap_pkts;
};
#define RING_BUFFER_MAX_SIZE (4096 * 1000)
struct pcap_pkt
{
@@ -45,92 +26,186 @@ struct pcap_pkt
struct timeval ts;
};
struct pcap_io_cfg
{
char mode[16]; // pcapfile, pcaplist
char pcap_path[PATH_MAX];
uint64_t thread_num; // range [1, MAX_THREAD_NUM]
// packet pool
uint64_t capacity; // range: [1, 4294967295]
// ip reassembly
uint64_t fail_action; // 0: bypass, 1: drop
uint64_t timeout_ms; // range: [1, 60000] (ms)
uint64_t frag_queue_num; // range: [1, 4294967295
uint64_t frag_queue_size; // range: [2, 65535]
};
struct pcap_io
{
struct pcap_io_cfg *cfg;
pcap_t *pcap;
struct logger *logger;
struct ring_buffer *ring[MAX_THREAD_NUM];
struct packet_pool *pool[MAX_THREAD_NUM];
struct packet_io_stat stat[MAX_THREAD_NUM];
struct ip_reassembly *ip_reass[MAX_THREAD_NUM];
uint64_t io_thread_need_exit;
uint64_t io_thread_is_runing;
uint64_t io_thread_wait_exit;
uint64_t read_pcap_files;
uint64_t read_pcap_pkts;
};
/******************************************************************************
* Private API -- queue
* Private API -- ring
******************************************************************************/
struct packet_queue
struct ring_buffer
{
uint64_t *queue;
uint64_t *buff;
uint32_t size;
uint32_t head;
uint32_t tail;
};
static struct packet_queue *packet_queue_new(uint32_t size)
static struct ring_buffer *ring_buffer_new(uint32_t size)
{
struct packet_queue *queue = (struct packet_queue *)calloc(1, sizeof(struct packet_queue));
if (queue == NULL)
struct ring_buffer *ring = (struct ring_buffer *)calloc(1, sizeof(struct ring_buffer));
if (ring == NULL)
{
PCAP_IO_LOG_ERROR("unable to new packet queue");
PCAP_IO_LOG_ERROR("unable to new ring buffer");
return NULL;
}
queue->queue = (uint64_t *)calloc(size, sizeof(uint64_t));
if (queue->queue == NULL)
ring->buff = (uint64_t *)calloc(size, sizeof(uint64_t));
if (ring->buff == NULL)
{
PCAP_IO_LOG_ERROR("unable to new packet queue");
free(queue);
PCAP_IO_LOG_ERROR("unable to new ring buffer");
free(ring);
return NULL;
}
queue->size = size;
queue->head = 0;
queue->tail = 0;
ring->size = size;
ring->head = 0;
ring->tail = 0;
return queue;
return ring;
}
static void packet_queue_free(struct packet_queue *queue)
static void ring_buffer_free(struct ring_buffer *ring)
{
if (queue == NULL)
if (ring)
{
return;
if (ring->buff)
{
free(ring->buff);
ring->buff = NULL;
}
free(ring);
ring = NULL;
}
if (queue->queue)
{
free(queue->queue);
queue->queue = NULL;
}
free(queue);
}
static int packet_queue_push(struct packet_queue *queue, void *data)
static int ring_buffer_push(struct ring_buffer *ring, void *data)
{
if (__sync_val_compare_and_swap(&queue->queue[queue->tail], 0, data) != 0)
if (__sync_val_compare_and_swap(&ring->buff[ring->tail], 0, data) != 0)
{
PCAP_IO_LOG_ERROR("packet queue is full, retry later");
PCAP_IO_LOG_ERROR("ring buffer is full, retry later");
return -1;
}
queue->tail = (queue->tail + 1) % queue->size;
ring->tail = (ring->tail + 1) % ring->size;
return 0;
}
static void packet_queue_pop(struct packet_queue *queue, void **data)
static void ring_buffer_pop(struct ring_buffer *ring, void **data)
{
uint64_t read = ATOMIC_READ(&queue->queue[queue->head]);
uint64_t read = ATOMIC_READ(&ring->buff[ring->head]);
if (read == 0)
{
*data = NULL;
return;
}
__sync_val_compare_and_swap(&queue->queue[queue->head], read, 0);
__sync_val_compare_and_swap(&ring->buff[ring->head], read, 0);
*data = (void *)read;
queue->head = (queue->head + 1) % queue->size;
ring->head = (ring->head + 1) % ring->size;
}
/******************************************************************************
* Private API -- utils
* Private API -- config
******************************************************************************/
static struct pcap_io_cfg *pcap_io_cfg_new(const char *toml_file)
{
struct pcap_io_cfg *cfg = (struct pcap_io_cfg *)calloc(1, sizeof(struct pcap_io_cfg));
if (cfg == NULL)
{
return NULL;
}
int ret = 0;
ret += load_toml_str_config(toml_file, "packet_io.mode", cfg->mode);
ret += load_toml_str_config(toml_file, "packet_io.pcap_path", cfg->pcap_path);
ret += load_toml_integer_config(toml_file, "packet_io.thread_num", &cfg->thread_num, 1, MAX_THREAD_NUM);
ret += load_toml_integer_config(toml_file, "packet_io.packet_pool.capacity", &cfg->capacity, 1, 4294967295);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.fail_action", &cfg->fail_action, 0, 1);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.timeout_ms", &cfg->timeout_ms, 1, 60000);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.frag_queue_num", &cfg->frag_queue_num, 1, 4294967295);
ret += load_toml_integer_config(toml_file, "packet_io.ip_reassembly.frag_queue_size", &cfg->frag_queue_size, 2, 65535);
if (strcmp(cfg->mode, "pcapfile") != 0 && strcmp(cfg->mode, "pcaplist") != 0)
{
PCAP_IO_LOG_ERROR("config file invalid packet_io.mode %s", cfg->mode);
ret = -1;
}
if (ret != 0)
{
free(cfg);
return NULL;
}
else
{
return cfg;
}
}
static void pcap_io_cfg_free(struct pcap_io_cfg *cfg)
{
if (cfg)
{
free(cfg);
cfg = NULL;
}
}
static void pcap_io_cfg_print(const struct pcap_io_cfg *cfg)
{
if (cfg)
{
PCAP_IO_LOG_INFO("packet_io.mode : %s", cfg->mode);
PCAP_IO_LOG_INFO("packet_io.pcap_path : %s", cfg->pcap_path);
PCAP_IO_LOG_INFO("packet_io.thread_num : %ld", cfg->thread_num);
PCAP_IO_LOG_INFO("packet_io.packet_pool.capacity : %lu", cfg->capacity);
PCAP_IO_LOG_INFO("packet_io.ip_reassembly.fail_action : %lu", cfg->fail_action);
PCAP_IO_LOG_INFO("packet_io.ip_reassembly.timeout_ms : %lu", cfg->timeout_ms);
PCAP_IO_LOG_INFO("packet_io.ip_reassembly.frag_queue_num : %lu", cfg->frag_queue_num);
PCAP_IO_LOG_INFO("packet_io.ip_reassembly.frag_queue_size : %lu", cfg->frag_queue_size);
}
}
/******************************************************************************
* Private API -- pcap
******************************************************************************/
static void pcap_pkt_handler(u_char *user, const struct pcap_pkthdr *h, const u_char *bytes)
{
struct pcap_io *handle = (struct pcap_io *)user;
struct pcap_io *pcap_io = (struct pcap_io *)user;
// copy packet data to new memory
struct pcap_pkt *pcap_pkt = (struct pcap_pkt *)calloc(1, sizeof(struct pcap_pkt) + h->caplen);
if (pcap_pkt == NULL)
{
@@ -141,36 +216,34 @@ static void pcap_pkt_handler(u_char *user, const struct pcap_pkthdr *h, const u_
pcap_pkt->len = h->caplen;
pcap_pkt->ts = h->ts;
memcpy((char *)pcap_pkt->data, bytes, h->caplen);
ATOMIC_INC(&handle->read_pcap_pkts);
ATOMIC_INC(&pcap_io->read_pcap_pkts);
// calculate packet hash
struct packet pkt;
memset(&pkt, 0, sizeof(struct packet));
packet_parse(&pkt, pcap_pkt->data, pcap_pkt->len);
uint64_t hash = packet_ldbc_hash(&pkt, PKT_LDBC_METH_OUTERMOST_INT_EXT_IP, PACKET_DIRECTION_OUTGOING);
// push packet to queue
struct packet_queue *queue = handle->queue[hash % handle->cfg.nr_worker_thread];
while (packet_queue_push(queue, pcap_pkt) == -1)
struct ring_buffer *ring = pcap_io->ring[hash % pcap_io->cfg->thread_num];
while (ring_buffer_push(ring, pcap_pkt) == -1)
{
if (ATOMIC_READ(&handle->io_thread_need_exit))
if (ATOMIC_READ(&pcap_io->io_thread_need_exit))
{
free(pcap_pkt);
PCAP_IO_LOG_FATAL("pcap io thread need exit");
pcap_breakloop(handle->pcap);
pcap_breakloop(pcap_io->pcap);
break;
}
usleep(1000);
}
if (ATOMIC_READ(&handle->io_thread_need_exit))
if (ATOMIC_READ(&pcap_io->io_thread_need_exit))
{
PCAP_IO_LOG_FATAL("pcap io thread need exit");
pcap_breakloop(handle->pcap);
pcap_breakloop(pcap_io->pcap);
}
}
static int pcap_io_handler(struct pcap_io *handle, const char *pcap_file)
static int pcap_io_handler(struct pcap_io *pcap_io, const char *pcap_file)
{
char resolved_path[256];
char pcap_errbuf[PCAP_ERRBUF_SIZE];
@@ -178,28 +251,28 @@ static int pcap_io_handler(struct pcap_io *handle, const char *pcap_file)
realpath(pcap_file, resolved_path);
PCAP_IO_LOG_FATAL("pcap %s in-processing", resolved_path)
handle->pcap = pcap_open_offline(resolved_path, pcap_errbuf);
if (handle->pcap == NULL)
pcap_io->pcap = pcap_open_offline(resolved_path, pcap_errbuf);
if (pcap_io->pcap == NULL)
{
PCAP_IO_LOG_ERROR("unable to open pcap file: %s, %s", resolved_path, pcap_errbuf);
return -1;
}
handle->read_pcap_files++;
pcap_loop(handle->pcap, -1, pcap_pkt_handler, (u_char *)handle);
pcap_close(handle->pcap);
pcap_io->read_pcap_files++;
pcap_loop(pcap_io->pcap, -1, pcap_pkt_handler, (u_char *)pcap_io);
pcap_close(pcap_io->pcap);
PCAP_IO_LOG_FATAL("pcap %s processed", resolved_path)
return 0;
}
static int all_packet_consumed(struct pcap_io *handle)
static int all_packet_consumed(struct pcap_io *pcap_io)
{
uint64_t consumed_pkts = 0;
uint64_t read_pcap_pkts = ATOMIC_READ(&handle->read_pcap_pkts);
for (uint16_t i = 0; i < handle->cfg.nr_worker_thread; i++)
uint64_t read_pcap_pkts = ATOMIC_READ(&pcap_io->read_pcap_pkts);
for (uint16_t i = 0; i < pcap_io->cfg->thread_num; i++)
{
consumed_pkts += ATOMIC_READ(&handle->stat[i].pkts_rx);
consumed_pkts += ATOMIC_READ(&pcap_io->stat[i].pkts_rx);
}
if (consumed_pkts == read_pcap_pkts)
{
@@ -213,49 +286,47 @@ static int all_packet_consumed(struct pcap_io *handle)
static void *pcap_io_thread(void *arg)
{
struct pcap_io *handle = (struct pcap_io *)arg;
__thread_local_logger = handle->logger;
struct pcap_io *pcap_io = (struct pcap_io *)arg;
__thread_local_logger = pcap_io->logger;
ATOMIC_SET(&handle->io_thread_is_runing, 1);
ATOMIC_SET(&pcap_io->io_thread_is_runing, 1);
PCAP_IO_LOG_FATAL("pcap io thread is running");
if (handle->cfg.mode == PACKET_IO_PCAPFILE)
if (strcmp(pcap_io->cfg->mode, "pcapfile") == 0)
{
pcap_io_handler(handle, handle->cfg.pcap_path);
pcap_io_handler(pcap_io, pcap_io->cfg->pcap_path);
}
else // PACKET_IO_PCAPLIST
else
{
FILE *fp = NULL;
if (strcmp(handle->cfg.pcap_path, "-") == 0)
if (strcmp(pcap_io->cfg->pcap_path, "-") == 0)
{
PCAP_IO_LOG_ERROR("pcap path is empty, read from stdin");
fp = stdin;
}
else
{
fp = fopen(handle->cfg.pcap_path, "r");
fp = fopen(pcap_io->cfg->pcap_path, "r");
if (fp == NULL)
{
PCAP_IO_LOG_ERROR("unable to open pcap path: %s", handle->cfg.pcap_path);
PCAP_IO_LOG_ERROR("unable to open pcap path: %s", pcap_io->cfg->pcap_path);
goto erro_out;
}
}
char line[PATH_MAX];
while (ATOMIC_READ(&handle->io_thread_need_exit) == 0 && fgets(line, sizeof(line), fp))
while (ATOMIC_READ(&pcap_io->io_thread_need_exit) == 0 && fgets(line, sizeof(line), fp))
{
if (line[0] == '#')
{
continue;
}
char *pos = strchr(line, '\n');
if (pos)
{
*pos = '\0';
}
pcap_io_handler(handle, line);
pcap_io_handler(pcap_io, line);
}
if (fp != stdin)
{
@@ -265,58 +336,201 @@ static void *pcap_io_thread(void *arg)
PCAP_IO_LOG_FATAL("pcap io thread read all pcap files");
erro_out:
while (ATOMIC_READ(&handle->io_thread_need_exit) == 0)
while (ATOMIC_READ(&pcap_io->io_thread_need_exit) == 0)
{
if (all_packet_consumed(handle))
if (all_packet_consumed(pcap_io))
{
ATOMIC_SET(&handle->io_thread_wait_exit, 1);
ATOMIC_SET(&pcap_io->io_thread_wait_exit, 1);
}
usleep(1000); // 1ms
}
PCAP_IO_LOG_FATAL("pcap io thread exit (read_pcap_files: %lu, read_pcap_pkts: %lu)", handle->read_pcap_files, ATOMIC_READ(&handle->read_pcap_pkts));
ATOMIC_SET(&handle->io_thread_is_runing, 0);
PCAP_IO_LOG_FATAL("pcap io thread exit (read_pcap_files: %lu, read_pcap_pkts: %lu)", pcap_io->read_pcap_files, ATOMIC_READ(&pcap_io->read_pcap_pkts));
ATOMIC_SET(&pcap_io->io_thread_is_runing, 0);
return NULL;
}
static void origin_free_cb(struct packet *pkt, void *args)
{
struct pcap_io *pcap_io = (struct pcap_io *)args;
struct packet_origin *origin = packet_get_origin(pkt);
struct pcap_pkt *pcap_pkt = origin->ctx;
struct packet_io_stat *stat = &pcap_io->stat[origin->thr_idx];
stat->pkts_user_freed++;
stat->bytes_user_freed += packet_get_raw_len(pkt);
free(pcap_pkt);
}
static struct packet *recv_packet(struct pcap_io *pcap_io, struct pcap_pkt *pcap_pkt, uint16_t thr_idx)
{
struct packet_io_stat *stat = &pcap_io->stat[thr_idx];
struct ip_reassembly *ip_reass = pcap_io->ip_reass[thr_idx];
struct packet_pool *pool = pcap_io->pool[thr_idx];
if (pcap_pkt == NULL)
{
return NULL;
}
stat->pkts_rx++;
stat->bytes_rx += pcap_pkt->len;
struct packet *pkt = packet_pool_pop(pool);
assert(pkt != NULL);
struct packet_origin origin = {
.type = ORIGIN_TYPE_PCAP,
.ctx = pcap_pkt,
.cb = origin_free_cb,
.args = pcap_io,
.thr_idx = thr_idx,
};
packet_parse(pkt, pcap_pkt->data, pcap_pkt->len);
memset(&pkt->meta, 0, sizeof(pkt->meta));
packet_set_action(pkt, PACKET_ACTION_FORWARD);
packet_set_timeval(pkt, &pcap_pkt->ts);
packet_set_origin(pkt, &origin);
if (packet_is_fragment(pkt))
{
return ip_reassembly_defrag(ip_reass, pkt, clock_get_real_time_ms());
}
else
{
return pkt;
}
}
static void send_packet(struct pcap_io *pcap_io, struct packet *pkt, uint16_t thr_idx)
{
struct pcap_pkt *pcap_pkt = NULL;
struct packet_io_stat *stat = &pcap_io->stat[thr_idx];
int len = packet_get_raw_len(pkt);
struct packet_origin *origin = packet_get_origin(pkt);
if (origin->type == ORIGIN_TYPE_PCAP)
{
pcap_pkt = (struct pcap_pkt *)origin->ctx;
free(pcap_pkt);
packet_pool_push(pcap_io->pool[thr_idx], pkt);
}
else
{
stat->pkts_injected++;
stat->bytes_injected += len;
struct tuple6 tuple;
char file[PATH_MAX] = {0};
char src_addr[INET6_ADDRSTRLEN] = {0};
char dst_addr[INET6_ADDRSTRLEN] = {0};
memset(&tuple, 0, sizeof(struct tuple6));
packet_get_innermost_tuple6(pkt, &tuple);
if (tuple.addr_family == AF_INET)
{
inet_ntop(AF_INET, &tuple.src_addr.v4, src_addr, INET6_ADDRSTRLEN);
inet_ntop(AF_INET, &tuple.dst_addr.v4, dst_addr, INET6_ADDRSTRLEN);
}
else
{
inet_ntop(AF_INET6, &tuple.src_addr.v6, src_addr, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, &tuple.dst_addr.v6, dst_addr, INET6_ADDRSTRLEN);
}
snprintf(file, sizeof(file), "inject-%s:%u-%s:%u-%lu.pcap", src_addr, ntohs(tuple.src_port), dst_addr, ntohs(tuple.dst_port), stat->pkts_injected);
if (packet_dump_pcap(pkt, file) == -1)
{
PCAP_IO_LOG_ERROR("unable to dump pcap file: %s", file);
}
else
{
PCAP_IO_LOG_FATAL("dump inject packet: %s", file);
}
packet_free(pkt);
}
stat->pkts_tx++;
stat->bytes_tx += len;
}
static void drop_packet(struct pcap_io *pcap_io, struct packet *pkt, uint16_t thr_idx)
{
struct packet_io_stat *stat = &pcap_io->stat[thr_idx];
int len = packet_get_raw_len(pkt);
struct packet_origin *origin = packet_get_origin(pkt);
stat->pkts_dropped++;
stat->bytes_dropped += len;
if (origin->type == ORIGIN_TYPE_PCAP)
{
struct pcap_pkt *pcap_pkt = (struct pcap_pkt *)origin->ctx;
free(pcap_pkt);
packet_pool_push(pcap_io->pool[thr_idx], pkt);
}
else
{
packet_free(pkt);
}
}
/******************************************************************************
* Public API
******************************************************************************/
void *pcap_io_new(const struct packet_io_config *cfg)
void *pcap_io_new(const char *toml_file)
{
pthread_t tid;
struct pcap_io *handle = (struct pcap_io *)calloc(1, sizeof(struct pcap_io));
if (handle == NULL)
struct pcap_io *pcap_io = (struct pcap_io *)calloc(1, sizeof(struct pcap_io));
if (pcap_io == NULL)
{
PCAP_IO_LOG_ERROR("unable to allocate memory for pcap_io");
return NULL;
}
handle->logger = __thread_local_logger;
memcpy(&handle->cfg, cfg, sizeof(struct packet_io_config));
for (uint16_t i = 0; i < handle->cfg.nr_worker_thread; i++)
pcap_io->cfg = pcap_io_cfg_new(toml_file);
if (pcap_io->cfg == NULL)
{
handle->queue[i] = packet_queue_new(MAX_PACKET_QUEUE_SIZE);
if (handle->queue[i] == NULL)
PCAP_IO_LOG_ERROR("unable to create pcap_io_cfg");
goto error_out;
}
pcap_io_cfg_print(pcap_io->cfg);
pcap_io->logger = __thread_local_logger;
for (uint16_t i = 0; i < pcap_io->cfg->thread_num; i++)
{
pcap_io->ring[i] = ring_buffer_new(RING_BUFFER_MAX_SIZE);
if (pcap_io->ring[i] == NULL)
{
PCAP_IO_LOG_ERROR("unable to create packet queue");
PCAP_IO_LOG_ERROR("unable to create ring buffer");
goto error_out;
}
pcap_io->pool[i] = packet_pool_new(pcap_io->cfg->capacity);
if (pcap_io->pool[i] == NULL)
{
PCAP_IO_LOG_ERROR("unable to create packet pool");
goto error_out;
}
pcap_io->ip_reass[i] = ip_reassembly_new(pcap_io->cfg->timeout_ms, pcap_io->cfg->frag_queue_num, pcap_io->cfg->frag_queue_size);
if (pcap_io->ip_reass[i] == NULL)
{
PCAP_IO_LOG_ERROR("unable to create ip reassembly");
goto error_out;
}
}
if (pthread_create(&tid, NULL, pcap_io_thread, (void *)handle) != 0)
if (pthread_create(&tid, NULL, pcap_io_thread, (void *)pcap_io) != 0)
{
PCAP_IO_LOG_ERROR("unable to create pcap io thread");
goto error_out;
}
return handle;
return pcap_io;
error_out:
pcap_io_free(handle);
pcap_io_free(pcap_io);
return NULL;
}
@@ -333,11 +547,11 @@ void pcap_io_free(void *handle)
}
struct pcap_pkt *pcap_pkt = NULL;
for (uint16_t i = 0; i < pcap_io->cfg.nr_worker_thread; i++)
for (uint16_t i = 0; i < pcap_io->cfg->thread_num; i++)
{
while (1)
{
packet_queue_pop(pcap_io->queue[i], (void **)&pcap_pkt);
ring_buffer_pop(pcap_io->ring[i], (void **)&pcap_pkt);
if (pcap_pkt)
{
free(pcap_pkt);
@@ -347,9 +561,11 @@ void pcap_io_free(void *handle)
break;
}
}
packet_queue_free(pcap_io->queue[i]);
ip_reassembly_free(pcap_io->ip_reass[i]);
packet_pool_free(pcap_io->pool[i]);
ring_buffer_free(pcap_io->ring[i]);
}
pcap_io_cfg_free(pcap_io->cfg);
free(pcap_io);
pcap_io = NULL;
}
@@ -367,128 +583,76 @@ int pcap_io_init(void *handle __attribute__((unused)), uint16_t thr_idx __attrib
return 0;
}
uint16_t pcap_io_ingress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
int pcap_io_recv(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
uint16_t nr_packet_parsed = 0;
struct packet *pkt = NULL;
struct pcap_pkt *pcap_pkt = NULL;
struct pcap_io *pcap_io = (struct pcap_io *)handle;
struct packet_queue *queue = pcap_io->queue[thr_idx];
struct packet_io_stat *stat = &pcap_io->stat[thr_idx];
struct ring_buffer *ring = pcap_io->ring[thr_idx];
for (uint16_t i = 0; i < nr_pkts; i++)
int ret = 0;
for (int i = 0; i < nr_pkts; i++)
{
packet_queue_pop(queue, (void **)&pcap_pkt);
if (pcap_pkt == NULL)
ring_buffer_pop(ring, (void **)&pcap_pkt);
pkt = recv_packet(pcap_io, pcap_pkt, thr_idx);
if (pkt)
{
break;
}
else
{
ATOMIC_INC(&stat->pkts_rx);
stat->bytes_rx += pcap_pkt->len;
stat->raw_pkts_rx++;
stat->raw_bytes_rx += pcap_pkt->len;
pkt = &pkts[nr_packet_parsed];
packet_parse(pkt, pcap_pkt->data, pcap_pkt->len);
memset(&pkt->meta, 0, sizeof(pkt->meta));
packet_set_origin_ctx(pkt, pcap_pkt);
packet_set_action(pkt, PACKET_ACTION_FORWARD);
packet_set_timeval(pkt, &pcap_pkt->ts);
nr_packet_parsed++;
pkts[ret++] = pkt;
}
}
return nr_packet_parsed;
return ret;
}
void pcap_io_egress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
void pcap_io_send(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
int len;
struct tuple6 tuple;
struct packet *frag = NULL;
struct packet *pkt = NULL;
struct pcap_io *pcap_io = (struct pcap_io *)handle;
struct packet_io_stat *stat = &pcap_io->stat[thr_idx];
char file[PATH_MAX] = {0};
char src_addr[INET6_ADDRSTRLEN] = {0};
char dst_addr[INET6_ADDRSTRLEN] = {0};
for (uint16_t i = 0; i < nr_pkts; i++)
for (int i = 0; i < nr_pkts; i++)
{
pkt = &pkts[i];
len = packet_get_raw_len(pkt);
pkt = pkts[i];
stat->pkts_tx++;
stat->bytes_tx += len;
if (packet_is_ctrl(pkt))
if (packet_is_defraged(pkt))
{
stat->ctrl_pkts_tx++;
stat->ctrl_bytes_tx += len;
while ((frag = packet_pop_frag(pkt)))
{
send_packet(pcap_io, frag, thr_idx);
}
packet_free(pkt);
}
else
{
stat->raw_pkts_tx++;
stat->raw_bytes_tx += len;
}
struct pcap_pkt *pcap_pkt = (struct pcap_pkt *)packet_get_origin_ctx(pkt);
if (pcap_pkt)
{
free(pcap_pkt);
}
else
{
stat->pkts_injected++;
stat->bytes_injected += len;
memset(&tuple, 0, sizeof(struct tuple6));
packet_get_innermost_tuple6(pkt, &tuple);
if (tuple.addr_family == AF_INET)
{
inet_ntop(AF_INET, &tuple.src_addr.v4, src_addr, INET6_ADDRSTRLEN);
inet_ntop(AF_INET, &tuple.dst_addr.v4, dst_addr, INET6_ADDRSTRLEN);
}
else
{
inet_ntop(AF_INET6, &tuple.src_addr.v6, src_addr, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, &tuple.dst_addr.v6, dst_addr, INET6_ADDRSTRLEN);
}
snprintf(file, sizeof(file), "inject-%s:%u-%s:%u-%lu.pcap", src_addr, ntohs(tuple.src_port), dst_addr, ntohs(tuple.dst_port), stat->pkts_injected);
if (packet_dump_pcap(pkt, file) == -1)
{
PCAP_IO_LOG_ERROR("unable to dump pcap file: %s", file);
}
else
{
PCAP_IO_LOG_FATAL("dump inject packet: %s", file);
}
send_packet(pcap_io, pkt, thr_idx);
}
pkts[i] = NULL;
}
}
void pcap_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts)
void pcap_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts)
{
struct packet *pkt = NULL;
struct packet *frag = NULL;
struct pcap_io *pcap_io = (struct pcap_io *)handle;
struct packet_io_stat *stat = &pcap_io->stat[thr_idx];
for (uint16_t i = 0; i < nr_pkts; i++)
for (int i = 0; i < nr_pkts; i++)
{
pkt = &pkts[i];
struct pcap_pkt *pcap_pkt = (struct pcap_pkt *)packet_get_origin_ctx(pkt);
if (pcap_pkt)
pkt = pkts[i];
if (packet_is_defraged(pkt))
{
stat->pkts_dropped++;
stat->bytes_dropped += packet_get_raw_len(pkt);
free(pcap_pkt);
while ((frag = packet_pop_frag(pkt)))
{
drop_packet(pcap_io, frag, thr_idx);
}
packet_free(pkt);
}
packet_free(pkt);
else
{
drop_packet(pcap_io, pkt, thr_idx);
}
pkts[i] = NULL;
}
}
@@ -497,9 +661,31 @@ void pcap_io_yield(void *handle __attribute__((unused)), uint16_t thr_idx __attr
return;
}
void pcap_io_polling(void *handle, uint16_t thr_idx)
{
struct pcap_io *pcap_io = (struct pcap_io *)handle;
struct ip_reassembly *ip_reass = pcap_io->ip_reass[thr_idx];
struct packet *pkt = NULL;
uint64_t now_ms = clock_get_real_time_ms();
while ((pkt = ip_reassembly_clean(ip_reass, now_ms)))
{
if (pcap_io->cfg->fail_action == 0)
{
send_packet(pcap_io, pkt, thr_idx);
}
else
{
drop_packet(pcap_io, pkt, thr_idx);
}
}
// TODO
// output stat
}
struct packet_io_stat *pcap_io_stat(void *handle, uint16_t thr_idx)
{
struct pcap_io *pcap_io = (struct pcap_io *)handle;
return &pcap_io->stat[thr_idx];
}

View File

@@ -7,15 +7,16 @@ extern "C"
#include "packet_io.h"
void *pcap_io_new(const struct packet_io_config *cfg);
void *pcap_io_new(const char *toml_file);
void pcap_io_free(void *handle);
int pcap_io_isbreak(void *handle);
int pcap_io_init(void *handle, uint16_t thr_idx);
uint16_t pcap_io_ingress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void pcap_io_egress(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
void pcap_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts, uint16_t nr_pkts);
int pcap_io_recv(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void pcap_io_send(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void pcap_io_drop(void *handle, uint16_t thr_idx, struct packet *pkts[], int nr_pkts);
void pcap_io_yield(void *handle, uint16_t thr_idx);
void pcap_io_polling(void *handle, uint16_t thr_idx);
struct packet_io_stat *pcap_io_stat(void *handle, uint16_t thr_idx);
#ifdef __cplusplus

View File

@@ -1,6 +1,6 @@
add_library(packet_manager
packet_manager_runtime.c
packet_manager.c
packet_pool.c
packet_parser.c
packet_builder.c
packet_filter.c

View File

@@ -6,8 +6,7 @@
#include "packet_parser.h"
#include "packet_internal.h"
#define PACKET_CRAFT_LOG_DEBUG(format, ...) STELLAR_LOG_DEBUG(__thread_local_logger, "packet craft", format, ##__VA_ARGS__)
#define PACKET_CRAFT_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "packet craft", format, ##__VA_ARGS__)
#define PACKET_BUILD_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "packet build", format, ##__VA_ARGS__)
struct fingerprint
{
@@ -79,7 +78,7 @@ static void update_gtp1_hdr(struct gtp1_hdr *gtp, int trim_len)
gtp1_hdr_set_msg_len(gtp, msg_len - trim_len);
if (gtp1_hdr_get_seq_flag(gtp) && gtp1_hdr_get_seq(gtp))
{
PACKET_CRAFT_LOG_ERROR("build packets may be dropped by intermediate devices, the GTPv1 layer requires a sequence number");
PACKET_BUILD_LOG_ERROR("build packets may be dropped by intermediate devices, the GTPv1 layer requires a sequence number");
}
}
@@ -89,7 +88,7 @@ static void update_gtp2_hdr(struct gtp2_hdr *gtp, int trim_len)
gtp2_hdr_set_msg_len(gtp, msg_len - trim_len);
if (gtp2_hdr_get_seq(gtp))
{
PACKET_CRAFT_LOG_ERROR("build packets may be dropped by intermediate devices, the GTPv2 layer requires a sequence number");
PACKET_BUILD_LOG_ERROR("build packets may be dropped by intermediate devices, the GTPv2 layer requires a sequence number");
}
}
@@ -256,7 +255,7 @@ struct packet *packet_build_tcp(const struct packet *origin_pkt, uint32_t tcp_se
(tcp_payload == NULL && tcp_payload_len != 0) || (tcp_payload != NULL && tcp_payload_len == 0) ||
(tcp_options_len && tcp_options_len % 4 != 0))
{
PACKET_CRAFT_LOG_ERROR("craft TCP packet failed, invalid arguments");
PACKET_BUILD_LOG_ERROR("build TCP packet failed, invalid arguments");
return NULL;
}
@@ -265,7 +264,7 @@ struct packet *packet_build_tcp(const struct packet *origin_pkt, uint32_t tcp_se
const struct layer_private *tcp_layer = packet_get_layer(origin_pkt, layer_count - 1);
if (tcp_layer == NULL || tcp_layer->proto != LAYER_PROTO_TCP)
{
PACKET_CRAFT_LOG_ERROR("craft TCP packet failed, the innermost layer of the original packet is not TCP");
PACKET_BUILD_LOG_ERROR("build TCP packet failed, the innermost layer of the original packet is not TCP");
return NULL;
}
@@ -275,7 +274,7 @@ struct packet *packet_build_tcp(const struct packet *origin_pkt, uint32_t tcp_se
struct packet *new_pkt = packet_new(new_pkt_len);
if (new_pkt == NULL)
{
PACKET_CRAFT_LOG_ERROR("craft TCP packet failed, no space to allocate new packet");
PACKET_BUILD_LOG_ERROR("build TCP packet failed, no space to allocate new packet");
return NULL;
}
@@ -297,7 +296,14 @@ struct packet *packet_build_tcp(const struct packet *origin_pkt, uint32_t tcp_se
packet_parse(new_pkt, new_pkt_data, new_pkt_len);
memcpy(&new_pkt->meta, &origin_pkt->meta, sizeof(struct metadata));
new_pkt->meta.origin_ctx = NULL;
struct packet_origin origin = {
.type = ORIGIN_TYPE_USER,
.ctx = NULL,
.cb = NULL,
.args = NULL,
.thr_idx = -1,
};
packet_set_origin(new_pkt, &origin);
return new_pkt;
}
@@ -307,7 +313,7 @@ struct packet *packet_build_udp(const struct packet *origin_pkt, const char *udp
// check arguments
if (origin_pkt == NULL || (udp_payload == NULL && udp_payload_len != 0) || (udp_payload != NULL && udp_payload_len == 0))
{
PACKET_CRAFT_LOG_ERROR("craft UDP packet failed, invalid arguments");
PACKET_BUILD_LOG_ERROR("build UDP packet failed, invalid arguments");
return NULL;
}
@@ -316,7 +322,7 @@ struct packet *packet_build_udp(const struct packet *origin_pkt, const char *udp
const struct layer_private *udp_layer = packet_get_layer(origin_pkt, layer_count - 1);
if (udp_layer == NULL || udp_layer->proto != LAYER_PROTO_UDP)
{
PACKET_CRAFT_LOG_ERROR("craft UDP packet failed, the innermost layer of the original packet is not UDP");
PACKET_BUILD_LOG_ERROR("build UDP packet failed, the innermost layer of the original packet is not UDP");
return NULL;
}
@@ -326,7 +332,7 @@ struct packet *packet_build_udp(const struct packet *origin_pkt, const char *udp
struct packet *new_pkt = packet_new(new_pkt_len);
if (new_pkt == NULL)
{
PACKET_CRAFT_LOG_ERROR("craft UDP packet failed, no space to allocate new packet");
PACKET_BUILD_LOG_ERROR("build UDP packet failed, no space to allocate new packet");
return NULL;
}
@@ -339,7 +345,14 @@ struct packet *packet_build_udp(const struct packet *origin_pkt, const char *udp
packet_parse(new_pkt, new_pkt_data, new_pkt_len);
memcpy(&new_pkt->meta, &origin_pkt->meta, sizeof(struct metadata));
new_pkt->meta.origin_ctx = NULL;
struct packet_origin origin = {
.type = ORIGIN_TYPE_USER,
.ctx = NULL,
.cb = NULL,
.args = NULL,
.thr_idx = -1,
};
packet_set_origin(new_pkt, &origin);
return new_pkt;
}
@@ -348,7 +361,7 @@ struct packet *packet_build_l3(const struct packet *origin_pkt, uint8_t ip_proto
{
if (origin_pkt == NULL || (l3_payload == NULL && l3_payload_len != 0) || (l3_payload != NULL && l3_payload_len == 0))
{
PACKET_CRAFT_LOG_ERROR("craft L3 packet failed, invalid arguments");
PACKET_BUILD_LOG_ERROR("build L3 packet failed, invalid arguments");
return NULL;
}
@@ -369,7 +382,7 @@ struct packet *packet_build_l3(const struct packet *origin_pkt, uint8_t ip_proto
}
if (l3_layer == NULL)
{
PACKET_CRAFT_LOG_ERROR("craft L3 packet failed, the original packet does not contain an IP layer");
PACKET_BUILD_LOG_ERROR("build L3 packet failed, the original packet does not contain an IP layer");
return NULL;
}
@@ -382,7 +395,7 @@ struct packet *packet_build_l3(const struct packet *origin_pkt, uint8_t ip_proto
struct packet *new_pkt = packet_new(new_pkt_len);
if (new_pkt == NULL)
{
PACKET_CRAFT_LOG_ERROR("craft L3 packet failed, no space to allocate new packet");
PACKET_BUILD_LOG_ERROR("build L3 packet failed, no space to allocate new packet");
return NULL;
}
@@ -408,7 +421,14 @@ struct packet *packet_build_l3(const struct packet *origin_pkt, uint8_t ip_proto
packet_parse(new_pkt, new_pkt_data, new_pkt_len);
memcpy(&new_pkt->meta, &origin_pkt->meta, sizeof(struct metadata));
new_pkt->meta.origin_ctx = NULL;
struct packet_origin origin = {
.type = ORIGIN_TYPE_USER,
.ctx = NULL,
.cb = NULL,
.args = NULL,
.thr_idx = -1,
};
packet_set_origin(new_pkt, &origin);
return new_pkt;
}

View File

@@ -1,18 +1,9 @@
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <string.h>
#include <sys/time.h>
#include "utils.h"
#include "log_internal.h"
#include "packet_dump.h"
#include "packet_parser.h"
#include "packet_helper.h"
#include "packet_internal.h"
#define PACKET_DUMP_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "packet dump", format, ##__VA_ARGS__)
struct pcap_pkt_hdr
{
unsigned int tv_sec; // time stamp
@@ -32,8 +23,6 @@ struct pcap_file_hdr
unsigned int linktype; // data link type (LINKTYPE_*)
};
// return 0: success
// return -1: failed
int packet_dump_pcap(const struct packet *pkt, const char *file)
{
const char *data = packet_get_raw_data(pkt);
@@ -49,16 +38,9 @@ int packet_dump_pcap(const struct packet *pkt, const char *file)
.snaplen = 0xFFFF,
.linktype = 1};
if (file == NULL || data == NULL || len == 0)
{
PACKET_DUMP_LOG_ERROR("invalid parameter, file: %p, data: %p, len: %d", file, data, len);
return -1;
}
FILE *fp = fopen(file, "w+");
if (fp == NULL)
{
PACKET_DUMP_LOG_ERROR("fopen %s failed, %s", file, strerror(errno));
return -1;
}

View File

@@ -5,13 +5,10 @@ extern "C"
{
#endif
struct packet;
#include "stellar/packet.h"
// return 0: success
// return -1: failed
int packet_dump_pcap(const struct packet *pkt, const char *file);
void packet_dump_hex(const struct packet *pkt, int fd);
// return number of bytes written
int packet_dump_str(const struct packet *pkt, char *buff, int size);
void packet_print(const struct packet *pkt);

View File

@@ -5,7 +5,7 @@ extern "C"
{
#endif
#include "utils.h"
#include "utils_internal.h"
#include <stdio.h>
#include <string.h>
#include <stdbool.h>

View File

@@ -34,7 +34,25 @@ struct metadata
enum packet_direction direction;
enum packet_action action;
struct timeval tv;
const void *origin_ctx;
};
enum origin_type
{
ORIGIN_TYPE_MR,
ORIGIN_TYPE_PCAP,
ORIGIN_TYPE_USER,
};
typedef void origin_free(struct packet *pkt, void *args);
struct packet_origin
{
enum origin_type type;
void *ctx;
origin_free *cb;
void *args;
int thr_idx;
};
struct layer_private
@@ -50,6 +68,8 @@ struct layer_private
uint16_t hdr_offset; // header offset from data_ptr
};
TAILQ_HEAD(packet_queue, packet);
struct packet
{
void *user_data;
@@ -58,14 +78,19 @@ struct packet
int8_t layers_used;
int8_t layers_size;
int8_t need_free;
int8_t is_defraged;
const char *data_ptr;
uint16_t data_len;
uint16_t trim_len; // trim eth padding
TAILQ_ENTRY(packet) stage_tqe;
TAILQ_ENTRY(packet) pool_tqe; // for packet pool
TAILQ_ENTRY(packet) frag_tqe; // for IP reassembly
TAILQ_ENTRY(packet) stage_tqe; // for packet manager
struct packet_queue frag_list; // for defraged packet
struct metadata meta;
struct packet_origin origin;
};
enum packet_load_balance_method
@@ -84,8 +109,8 @@ enum packet_load_balance_method
void packet_set_route_ctx(struct packet *pkt, const struct route_ctx *ctx);
const struct route_ctx *packet_get_route_ctx(const struct packet *pkt);
void packet_set_origin_ctx(struct packet *pkt, void *ctx);
const void *packet_get_origin_ctx(const struct packet *pkt);
void packet_set_origin(struct packet *pkt, struct packet_origin *origin);
struct packet_origin *packet_get_origin(struct packet *pkt);
void packet_set_sids(struct packet *pkt, const struct sids *sids);
const struct sids *packet_get_sids(const struct packet *pkt);
@@ -152,6 +177,11 @@ struct packet *packet_dup(const struct packet *pkt);
void packet_free(struct packet *pkt);
int packet_is_fragment(const struct packet *pkt);
int packet_is_defraged(const struct packet *pkt);
void packet_set_defraged(struct packet *pkt);
void packet_push_frag(struct packet *pkt, struct packet *frag);
struct packet *packet_pop_frag(struct packet *pkt);
#ifdef __cplusplus
}

View File

@@ -1,15 +1,25 @@
#include <assert.h>
#include "utils.h"
#include "utils_internal.h"
#include "packet_internal.h"
#include "packet_manager_runtime.h"
#include "packet_manager_internal.h"
#define PACKET_MANAGER_MODULE_NAME "packet_manager_module"
struct packet_manager_config
#define PACKET_MANAGER_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
#define PACKET_MANAGER_LOG_FATAL(format, ...) STELLAR_LOG_FATAL(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
#define PACKET_MANAGER_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
struct packet_manager_runtime
{
uint16_t nr_worker_thread;
enum packet_stage curr_stage;
struct packet_queue queue[PACKET_QUEUE_MAX];
void *claim_arg;
on_packet_claimed_callback *claim_cb;
struct mq_runtime *mq;
struct packet_manager_stat stat;
};
struct packet_manager_schema
@@ -21,43 +31,63 @@ struct packet_manager_schema
struct packet_manager
{
struct packet_manager_config *cfg;
uint64_t thread_num;
struct packet_manager_schema *schema;
struct packet_manager_runtime *runtime[MAX_THREAD_NUM];
};
/******************************************************************************
* packet manager config
******************************************************************************/
static void packet_manager_config_free(struct packet_manager_config *cfg)
const char *packet_stage_to_str(enum packet_stage stage)
{
if (cfg)
switch (stage)
{
free(cfg);
cfg = NULL;
case PACKET_STAGE_PREROUTING:
return "PACKET_STAGE_PREROUTING";
case PACKET_STAGE_INPUT:
return "PACKET_STAGE_INPUT";
case PACKET_STAGE_FORWARD:
return "PACKET_STAGE_FORWARD";
case PACKET_STAGE_OUTPUT:
return "PACKET_STAGE_OUTPUT";
case PACKET_STAGE_POSTROUTING:
return "PACKET_STAGE_POSTROUTING";
default:
return "PACKET_STAGE_UNKNOWN";
}
}
static struct packet_manager_config *packet_manager_config_new(const char *toml_file)
void packet_manager_runtime_free(struct packet_manager_runtime *pkt_mgr_rt)
{
struct packet_manager_config *cfg = calloc(1, sizeof(struct packet_manager_config));
if (cfg == NULL)
if (pkt_mgr_rt)
{
PACKET_MANAGER_LOG_ERROR("failed to allocate memory for packet_manager_config");
for (int i = 0; i < PACKET_QUEUE_MAX; i++)
{
struct packet *pkt = NULL;
while ((pkt = TAILQ_FIRST(&pkt_mgr_rt->queue[i])))
{
TAILQ_REMOVE(&pkt_mgr_rt->queue[i], pkt, stage_tqe);
packet_free(pkt);
}
}
}
free(pkt_mgr_rt);
pkt_mgr_rt = NULL;
}
struct packet_manager_runtime *packet_manager_runtime_new()
{
struct packet_manager_runtime *pkt_mgr_rt = calloc(1, sizeof(struct packet_manager_runtime));
if (pkt_mgr_rt == NULL)
{
PACKET_MANAGER_LOG_ERROR("failed to allocate memory for packet_manager_runtime");
return NULL;
}
uint64_t val = 0;
if (load_and_validate_toml_integer_config(toml_file, "packet_io.nr_worker_thread", &val, 1, MAX_THREAD_NUM) != 0)
for (int i = 0; i < PACKET_QUEUE_MAX; i++)
{
PACKET_MANAGER_LOG_ERROR("failed to load packet_io.nr_worker_thread from %s", toml_file);
free(cfg);
return NULL;
TAILQ_INIT(&pkt_mgr_rt->queue[i]);
}
cfg->nr_worker_thread = val;
return cfg;
return pkt_mgr_rt;
}
/******************************************************************************
@@ -85,7 +115,7 @@ static void on_packet_stage_dispatch(int topic_id, void *msg, on_msg_cb_func *cb
((on_packet_stage_callback *)(void *)cb)(stage, pkt, cb_arg);
}
static void packet_manager_schema_free(struct packet_manager_schema *pkt_mgr_schema)
static void packet_schema_free(struct packet_manager_schema *pkt_mgr_schema)
{
if (pkt_mgr_schema)
{
@@ -110,12 +140,12 @@ static void packet_manager_schema_free(struct packet_manager_schema *pkt_mgr_sch
}
}
static struct packet_manager_schema *packet_manager_schema_new(struct mq_schema *mq)
static struct packet_manager_schema *packet_schema_new(struct mq_schema *mq)
{
struct packet_manager_schema *pkt_mgr_schema = calloc(1, sizeof(struct packet_manager_schema));
if (pkt_mgr_schema == NULL)
{
PACKET_MANAGER_LOG_ERROR("failed to allocate memory for packet_manager_schema");
PACKET_MANAGER_LOG_ERROR("failed to allocate memory for packet_schema");
return NULL;
}
@@ -140,7 +170,7 @@ static struct packet_manager_schema *packet_manager_schema_new(struct mq_schema
return pkt_mgr_schema;
error_out:
packet_manager_schema_free(pkt_mgr_schema);
packet_schema_free(pkt_mgr_schema);
return NULL;
}
@@ -148,7 +178,7 @@ error_out:
* packet manager
******************************************************************************/
struct packet_manager *packet_manager_new(struct mq_schema *mq, const char *toml_file)
struct packet_manager *packet_manager_new(struct mq_schema *mq_schema, uint64_t thread_num)
{
struct packet_manager *pkt_mgr = calloc(1, sizeof(struct packet_manager));
if (pkt_mgr == NULL)
@@ -157,21 +187,15 @@ struct packet_manager *packet_manager_new(struct mq_schema *mq, const char *toml
return NULL;
}
pkt_mgr->cfg = packet_manager_config_new(toml_file);
if (pkt_mgr->cfg == NULL)
{
PACKET_MANAGER_LOG_ERROR("failed to create packet_manager_config");
goto error_out;
}
pkt_mgr->schema = packet_manager_schema_new(mq);
pkt_mgr->thread_num = thread_num;
pkt_mgr->schema = packet_schema_new(mq_schema);
if (pkt_mgr->schema == NULL)
{
PACKET_MANAGER_LOG_ERROR("failed to create packet_manager_schema");
PACKET_MANAGER_LOG_ERROR("failed to create packet_schema");
goto error_out;
}
for (uint16_t i = 0; i < pkt_mgr->cfg->nr_worker_thread; i++)
for (uint16_t i = 0; i < pkt_mgr->thread_num; i++)
{
pkt_mgr->runtime[i] = packet_manager_runtime_new();
if (pkt_mgr->runtime[i] == NULL)
@@ -194,22 +218,18 @@ void packet_manager_free(struct packet_manager *pkt_mgr)
if (pkt_mgr)
{
if (pkt_mgr->cfg)
for (uint16_t i = 0; i < pkt_mgr->thread_num; i++)
{
for (uint16_t i = 0; i < pkt_mgr->cfg->nr_worker_thread; i++)
pkt_mgr_rt = pkt_mgr->runtime[i];
if (pkt_mgr_rt)
{
pkt_mgr_rt = pkt_mgr->runtime[i];
if (pkt_mgr_rt)
{
PACKET_MANAGER_LOG_INFO("runtime: %p, idx: %d, will be cleaned", pkt_mgr_rt, i);
packet_manager_runtime_print_stat(pkt_mgr_rt);
packet_manager_runtime_free(pkt_mgr_rt);
}
packet_manager_print_stat(pkt_mgr, i);
packet_manager_runtime_free(pkt_mgr_rt);
}
}
packet_manager_schema_free(pkt_mgr->schema);
packet_manager_config_free(pkt_mgr->cfg);
packet_schema_free(pkt_mgr->schema);
free(pkt_mgr);
pkt_mgr = NULL;
@@ -218,73 +238,158 @@ void packet_manager_free(struct packet_manager *pkt_mgr)
int packet_manager_new_packet_exdata_index(struct packet_manager *pkt_mgr, const char *name, exdata_free *func, void *arg)
{
assert(pkt_mgr);
return exdata_schema_new_index(pkt_mgr->schema->exdata, name, func, arg);
}
int packet_manager_subscribe(struct packet_manager *pkt_mgr, enum packet_stage stage, on_packet_stage_callback *cb, void *args)
{
return mq_schema_subscribe(pkt_mgr->schema->mq, pkt_mgr->schema->topic_id[stage], (on_msg_cb_func *)cb, args);
}
void packet_manager_init(struct packet_manager *pkt_mgr, uint16_t thread_id, struct mq_runtime *mq_rt)
int packet_manager_subscribe(struct packet_manager *pkt_mgr, enum packet_stage stage, on_packet_stage_callback *cb, void *arg)
{
assert(pkt_mgr);
assert(thread_id < pkt_mgr->cfg->nr_worker_thread);
return mq_schema_subscribe(pkt_mgr->schema->mq, pkt_mgr->schema->topic_id[stage], (on_msg_cb_func *)cb, arg);
}
int packet_manager_init(struct packet_manager *pkt_mgr, uint16_t thread_id, struct mq_runtime *mq_rt)
{
assert(pkt_mgr);
assert(thread_id < pkt_mgr->thread_num);
assert(mq_rt);
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
packet_manager_runtime_init(runtime, mq_rt);
runtime->mq = mq_rt;
return 0;
}
void packet_manager_ingress(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt)
{
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
struct exdata_runtime *exdata_rt = exdata_runtime_new(pkt_mgr->schema->exdata);
packet_set_user_data(pkt, exdata_rt);
packet_manager_runtime_ingress(runtime, pkt);
runtime->stat.total.pkts_ingress++;
runtime->stat.queue[PACKET_STAGE_PREROUTING].pkts_in++;
TAILQ_INSERT_TAIL(&runtime->queue[PACKET_STAGE_PREROUTING], pkt, stage_tqe);
}
struct packet *packet_manager_egress(struct packet_manager *pkt_mgr, uint16_t thread_id)
{
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
struct packet *pkt = packet_manager_runtime_egress(runtime);
struct packet *pkt = TAILQ_FIRST(&runtime->queue[PACKET_STAGE_MAX]);
if (pkt)
{
runtime->stat.total.pkts_egress++;
runtime->stat.queue[PACKET_STAGE_MAX].pkts_out++;
TAILQ_REMOVE(&runtime->queue[PACKET_STAGE_MAX], pkt, stage_tqe);
struct exdata_runtime *exdata_rt = packet_get_user_data(pkt);
exdata_runtime_free(exdata_rt);
return pkt;
}
else
{
return NULL;
}
return pkt;
}
void packet_manager_dispatch(struct packet_manager *pkt_mgr, uint16_t thread_id)
{
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
packet_manager_runtime_dispatch(runtime);
for (int i = 0; i < PACKET_STAGE_MAX; i++)
{
runtime->curr_stage = i;
struct packet *pkt = NULL;
while ((pkt = TAILQ_FIRST(&runtime->queue[runtime->curr_stage])))
{
packet_set_claim(pkt, false);
runtime->claim_cb = NULL;
runtime->claim_arg = NULL;
TAILQ_REMOVE(&runtime->queue[runtime->curr_stage], pkt, stage_tqe);
runtime->stat.queue[runtime->curr_stage].pkts_out++;
mq_runtime_publish_message(runtime->mq, runtime->curr_stage, pkt);
mq_runtime_dispatch(runtime->mq);
if (packet_is_claim(pkt))
{
if (runtime->claim_cb)
{
runtime->claim_cb(pkt, runtime->claim_arg);
}
continue;
}
TAILQ_INSERT_TAIL(&runtime->queue[runtime->curr_stage + 1], pkt, stage_tqe);
runtime->stat.queue[runtime->curr_stage + 1].pkts_in++;
}
}
runtime->curr_stage = -1;
}
int packet_manager_claim_packet(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt, on_packet_claimed_callback cb, void *args)
int packet_manager_claim_packet(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt, on_packet_claimed_callback cb, void *arg)
{
assert(pkt_mgr);
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
return packet_manager_runtime_claim_packet(runtime, pkt, cb, args);
if (packet_is_claim(pkt))
{
PACKET_MANAGER_LOG_ERROR("packet is already claimed, cannot claim again");
return -1;
}
else
{
runtime->claim_cb = cb;
runtime->claim_arg = arg;
packet_set_claim(pkt, true);
runtime->stat.queue[runtime->curr_stage].pkts_claim++;
return 0;
}
}
void packet_manager_schedule_packet(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt, enum packet_stage stage)
{
assert(pkt_mgr);
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
packet_manager_runtime_schedule_packet(runtime, pkt, stage);
if (stage >= PACKET_STAGE_MAX)
{
PACKET_MANAGER_LOG_ERROR("invalid stage %d", stage);
assert(0);
return;
}
runtime->stat.queue[stage].pkts_schedule++;
runtime->stat.queue[stage].pkts_in++;
TAILQ_INSERT_TAIL(&runtime->queue[stage], pkt, stage_tqe);
}
struct packet_manager_stat *packet_manager_get_stat(struct packet_manager *pkt_mgr, uint16_t thread_id)
{
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
return packet_manager_runtime_get_stat(runtime);
return &runtime->stat;
}
void packet_manager_print_stat(struct packet_manager *pkt_mgr, uint16_t thread_id)
{
struct packet_manager_runtime *runtime = pkt_mgr->runtime[thread_id];
packet_manager_runtime_print_stat(runtime);
PACKET_MANAGER_LOG_INFO("runtime: %p, pkts_ingress: %lu, pkts_egress: %lu",
runtime, runtime->stat.total.pkts_ingress,
runtime->stat.total.pkts_egress);
for (int i = 0; i < PACKET_QUEUE_MAX; i++)
{
PACKET_MANAGER_LOG_INFO("runtime: %p, %-24s stat => pkts_in: %lu, pkts_out: %lu, pkts_claim: %lu, pkts_schedule: %lu",
runtime,
packet_stage_to_str(i),
runtime->stat.queue[i].pkts_in,
runtime->stat.queue[i].pkts_out,
runtime->stat.queue[i].pkts_claim,
runtime->stat.queue[i].pkts_schedule);
}
}
/******************************************************************************
@@ -299,7 +404,7 @@ struct packet_manager *stellar_module_get_packet_manager(struct stellar_module_m
{
return NULL;
}
return (struct packet_manager*)stellar_module_get_ctx(pkt_mgr_mod);
return (struct packet_manager *)stellar_module_get_ctx(pkt_mgr_mod);
}
struct stellar_module *packet_manager_on_init(struct stellar_module_manager *mod_mgr)
@@ -307,10 +412,9 @@ struct stellar_module *packet_manager_on_init(struct stellar_module_manager *mod
assert(mod_mgr);
struct mq_schema *mq_schema = stellar_module_manager_get_mq_schema(mod_mgr);
assert(mq_schema);
const char *toml_file = stellar_module_manager_get_toml_path(mod_mgr);
assert(toml_file);
uint64_t thread_num = stellar_module_manager_get_max_thread_num(mod_mgr);
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, toml_file);
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, thread_num);
if (pkt_mgr == NULL)
{
return NULL;

View File

@@ -8,17 +8,36 @@ extern "C"
#include "stellar/mq.h"
#include "stellar/packet_manager.h"
struct packet_manager *packet_manager_new(struct mq_schema *mq_schema, const char *toml_file);
#define PACKET_QUEUE_MAX (PACKET_STAGE_MAX + 1)
struct packet_manager_stat
{
struct
{
uint64_t pkts_ingress;
uint64_t pkts_egress;
} total;
struct
{
uint64_t pkts_in; // include the packets that are scheduled
uint64_t pkts_out; // include the packets that are claimed
uint64_t pkts_claim;
uint64_t pkts_schedule;
} queue[PACKET_QUEUE_MAX]; // the last queue is for sending packets
};
struct packet_manager *packet_manager_new(struct mq_schema *mq_schema, uint64_t thread_num);
void packet_manager_free(struct packet_manager *pkt_mgr);
void packet_manager_init(struct packet_manager *pkt_mgr, uint16_t thread_id, struct mq_runtime *mq_rt);
int packet_manager_init(struct packet_manager *pkt_mgr, uint16_t thread_id, struct mq_runtime *mq_rt);
void packet_manager_ingress(struct packet_manager *pkt_mgr, uint16_t thread_id, struct packet *pkt);
struct packet *packet_manager_egress(struct packet_manager *pkt_mgr, uint16_t thread_id);
void packet_manager_dispatch(struct packet_manager *pkt_mgr, uint16_t thread_id);
struct packet_manager_stat *packet_manager_get_stat(struct packet_manager *pkt_mgr, uint16_t thread_id);
void packet_manager_print_stat(struct packet_manager *pkt_mgr, uint16_t thread_id);
const char *packet_stage_to_str(enum packet_stage stage);
#ifdef __cplusplus
}
#endif

View File

@@ -1,173 +0,0 @@
#include <assert.h>
#include <stdlib.h>
#include "packet_internal.h"
#include "packet_manager_runtime.h"
const char *packet_stage_to_str(enum packet_stage stage)
{
switch (stage)
{
case PACKET_STAGE_PREROUTING:
return "PACKET_STAGE_PREROUTING";
case PACKET_STAGE_INPUT:
return "PACKET_STAGE_INPUT";
case PACKET_STAGE_FORWARD:
return "PACKET_STAGE_FORWARD";
case PACKET_STAGE_OUTPUT:
return "PACKET_STAGE_OUTPUT";
case PACKET_STAGE_POSTROUTING:
return "PACKET_STAGE_POSTROUTING";
default:
return "PACKET_STAGE_UNKNOWN";
}
}
struct packet_manager_runtime *packet_manager_runtime_new()
{
struct packet_manager_runtime *pkt_mgr_rt = calloc(1, sizeof(struct packet_manager_runtime));
if (pkt_mgr_rt == NULL)
{
PACKET_MANAGER_LOG_ERROR("failed to allocate memory for packet_manager_runtime");
return NULL;
}
for (int i = 0; i < PACKET_QUEUE_MAX; i++)
{
TAILQ_INIT(&pkt_mgr_rt->queue[i]);
}
return pkt_mgr_rt;
}
void packet_manager_runtime_free(struct packet_manager_runtime *pkt_mgr_rt)
{
if (pkt_mgr_rt)
{
for (int i = 0; i < PACKET_QUEUE_MAX; i++)
{
struct packet *pkt = NULL;
while ((pkt = TAILQ_FIRST(&pkt_mgr_rt->queue[i])))
{
TAILQ_REMOVE(&pkt_mgr_rt->queue[i], pkt, stage_tqe);
// TODO: free packet and free mbuff
packet_free(pkt);
}
}
}
free(pkt_mgr_rt);
pkt_mgr_rt = NULL;
}
void packet_manager_runtime_init(struct packet_manager_runtime *pkt_mgr_rt, struct mq_runtime *mq_rt)
{
pkt_mgr_rt->mq = mq_rt;
}
void packet_manager_runtime_ingress(struct packet_manager_runtime *pkt_mgr_rt, struct packet *pkt)
{
pkt_mgr_rt->stat.total.pkts_ingress++;
pkt_mgr_rt->stat.queue[PACKET_STAGE_PREROUTING].pkts_in++;
TAILQ_INSERT_TAIL(&pkt_mgr_rt->queue[PACKET_STAGE_PREROUTING], pkt, stage_tqe);
}
struct packet *packet_manager_runtime_egress(struct packet_manager_runtime *pkt_mgr_rt)
{
struct packet *pkt = TAILQ_FIRST(&pkt_mgr_rt->queue[PACKET_STAGE_MAX]);
if (pkt)
{
pkt_mgr_rt->stat.total.pkts_egress++;
pkt_mgr_rt->stat.queue[PACKET_STAGE_MAX].pkts_out++;
TAILQ_REMOVE(&pkt_mgr_rt->queue[PACKET_STAGE_MAX], pkt, stage_tqe);
}
return pkt;
}
void packet_manager_runtime_dispatch(struct packet_manager_runtime *pkt_mgr_rt)
{
for (int i = 0; i < PACKET_STAGE_MAX; i++)
{
pkt_mgr_rt->stage = i;
struct packet *pkt = NULL;
while ((pkt = TAILQ_FIRST(&pkt_mgr_rt->queue[pkt_mgr_rt->stage])))
{
packet_set_claim(pkt, false);
pkt_mgr_rt->claimed_cb = NULL;
pkt_mgr_rt->cb_args = NULL;
TAILQ_REMOVE(&pkt_mgr_rt->queue[pkt_mgr_rt->stage], pkt, stage_tqe);
pkt_mgr_rt->stat.queue[pkt_mgr_rt->stage].pkts_out++;
mq_runtime_publish_message(pkt_mgr_rt->mq, pkt_mgr_rt->stage, pkt);
mq_runtime_dispatch(pkt_mgr_rt->mq);
if (packet_is_claim(pkt))
{
if (pkt_mgr_rt->claimed_cb)
{
pkt_mgr_rt->claimed_cb(pkt, pkt_mgr_rt->cb_args);
}
packet_set_claim(pkt, false);
continue;
}
TAILQ_INSERT_TAIL(&pkt_mgr_rt->queue[pkt_mgr_rt->stage + 1], pkt, stage_tqe);
pkt_mgr_rt->stat.queue[pkt_mgr_rt->stage + 1].pkts_in++;
}
}
pkt_mgr_rt->stage = -1;
}
int packet_manager_runtime_claim_packet(struct packet_manager_runtime *pkt_mgr_rt, struct packet *pkt, on_packet_claimed_callback cb, void *args)
{
if (packet_is_claim(pkt))
{
PACKET_MANAGER_LOG_ERROR("packet is already claimed, cannot claim again");
return -1;
}
else
{
pkt_mgr_rt->claimed_cb = cb;
pkt_mgr_rt->cb_args = args;
packet_set_claim(pkt, true);
pkt_mgr_rt->stat.queue[pkt_mgr_rt->stage].pkts_claim++;
return 0;
}
}
void packet_manager_runtime_schedule_packet(struct packet_manager_runtime *pkt_mgr_rt, struct packet *pkt, enum packet_stage stage)
{
if (stage >= PACKET_STAGE_MAX)
{
PACKET_MANAGER_LOG_ERROR("invalid stage %d", stage);
assert(0);
return;
}
pkt_mgr_rt->stat.queue[stage].pkts_schedule++;
pkt_mgr_rt->stat.queue[stage].pkts_in++;
TAILQ_INSERT_TAIL(&pkt_mgr_rt->queue[stage], pkt, stage_tqe);
}
struct packet_manager_stat *packet_manager_runtime_get_stat(struct packet_manager_runtime *pkt_mgr_rt)
{
return &pkt_mgr_rt->stat;
}
void packet_manager_runtime_print_stat(struct packet_manager_runtime *pkt_mgr_rt)
{
PACKET_MANAGER_LOG_INFO("runtime: %p, pkts_ingress: %lu, pkts_egress: %lu",
pkt_mgr_rt, pkt_mgr_rt->stat.total.pkts_ingress, pkt_mgr_rt->stat.total.pkts_egress);
for (int i = 0; i < PACKET_QUEUE_MAX; i++)
{
PACKET_MANAGER_LOG_INFO("runtime: %p, %-24s stat => pkts_in: %lu, pkts_out: %lu, pkts_claim: %lu, pkts_schedule: %lu",
pkt_mgr_rt,
packet_stage_to_str(i),
pkt_mgr_rt->stat.queue[i].pkts_in,
pkt_mgr_rt->stat.queue[i].pkts_out,
pkt_mgr_rt->stat.queue[i].pkts_claim,
pkt_mgr_rt->stat.queue[i].pkts_schedule);
}
}

View File

@@ -1,69 +0,0 @@
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include <sys/queue.h>
#include "log_internal.h"
#include "stellar/mq.h"
#include "stellar/packet_manager.h"
#define PACKET_MANAGER_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
#define PACKET_MANAGER_LOG_DEBUG(format, ...) STELLAR_LOG_DEBUG(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
#define PACKET_MANAGER_LOG_FATAL(format, ...) STELLAR_LOG_FATAL(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
#define PACKET_MANAGER_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "packet manager", format, ##__VA_ARGS__)
#define PACKET_QUEUE_MAX (PACKET_STAGE_MAX + 1)
TAILQ_HEAD(packet_queue, packet);
struct packet_manager_stat
{
struct
{
uint64_t pkts_ingress;
uint64_t pkts_egress;
} total;
struct
{
uint64_t pkts_in; // include the packets that are scheduled
uint64_t pkts_out; // include the packets that are claimed
uint64_t pkts_claim;
uint64_t pkts_schedule;
} queue[PACKET_QUEUE_MAX]; // the last queue is for sending packets
};
struct packet_manager_runtime
{
enum packet_stage stage;
struct packet_queue queue[PACKET_QUEUE_MAX];
void *cb_args;
on_packet_claimed_callback *claimed_cb;
struct mq_runtime *mq;
struct packet_manager_stat stat;
};
struct packet_manager_runtime *packet_manager_runtime_new();
void packet_manager_runtime_free(struct packet_manager_runtime *pkt_mgr_rt);
void packet_manager_runtime_init(struct packet_manager_runtime *pkt_mgr_rt, struct mq_runtime *mq_rt);
void packet_manager_runtime_ingress(struct packet_manager_runtime *pkt_mgr_rt, struct packet *pkt);
struct packet *packet_manager_runtime_egress(struct packet_manager_runtime *pkt_mgr_rt);
void packet_manager_runtime_dispatch(struct packet_manager_runtime *pkt_mgr_rt);
int packet_manager_runtime_claim_packet(struct packet_manager_runtime *pkt_mgr_rt, struct packet *pkt, on_packet_claimed_callback cb, void *args);
void packet_manager_runtime_schedule_packet(struct packet_manager_runtime *pkt_mgr_rt, struct packet *pkt, enum packet_stage stage);
struct packet_manager_stat *packet_manager_runtime_get_stat(struct packet_manager_runtime *pkt_mgr_rt);
void packet_manager_runtime_print_stat(struct packet_manager_runtime *pkt_mgr_rt);
const char *packet_stage_to_str(enum packet_stage stage);
#ifdef __cplusplus
}
#endif

View File

@@ -0,0 +1,115 @@
#include <assert.h>
#include <stdlib.h>
#include "packet_pool.h"
#include "packet_internal.h"
struct packet_pool
{
uint64_t capacity;
uint64_t used;
uint64_t free;
struct packet_queue free_list;
};
struct packet_pool *packet_pool_new(uint64_t capacity)
{
struct packet_pool *pool = (struct packet_pool *)calloc(1, sizeof(struct packet_pool));
if (pool == NULL)
{
return NULL;
}
pool->capacity = capacity;
pool->used = 0;
pool->free = 0;
TAILQ_INIT(&pool->free_list);
for (uint64_t i = 0; i < capacity; i++)
{
struct packet *pkt = (struct packet *)calloc(1, sizeof(struct packet));
if (pkt == NULL)
{
goto error_out;
}
TAILQ_INSERT_TAIL(&pool->free_list, pkt, pool_tqe);
pool->free++;
}
return pool;
error_out:
packet_pool_free(pool);
return NULL;
}
void packet_pool_free(struct packet_pool *pool)
{
struct packet *pkt;
if (pool)
{
while ((pkt = TAILQ_FIRST(&pool->free_list)))
{
TAILQ_REMOVE(&pool->free_list, pkt, pool_tqe);
free(pkt);
pool->free--;
}
assert(pool->used == 0);
assert(pool->free == 0);
free(pool);
pool = NULL;
}
}
struct packet *packet_pool_pop(struct packet_pool *pool)
{
struct packet *pkt = TAILQ_FIRST(&pool->free_list);
if (pkt == NULL)
{
pkt = (struct packet *)calloc(1, sizeof(struct packet));
if (pkt == NULL)
{
return NULL;
}
}
else
{
TAILQ_REMOVE(&pool->free_list, pkt, pool_tqe);
pool->free--;
}
pool->used++;
return pkt;
}
void packet_pool_push(struct packet_pool *pool, struct packet *pkt)
{
if (pool == NULL || pkt == NULL)
{
return;
}
pool->used--;
if (pool->free < pool->capacity)
{
TAILQ_INSERT_TAIL(&pool->free_list, pkt, pool_tqe);
pool->free++;
}
else
{
free(pkt);
}
}
uint64_t packet_pool_get_used_num(const struct packet_pool *pool)
{
return pool->used;
}
uint64_t packet_pool_get_free_num(const struct packet_pool *pool)
{
return pool->free;
}

View File

@@ -0,0 +1,22 @@
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include <stdint.h>
struct packet_pool;
struct packet_pool *packet_pool_new(uint64_t capacity);
void packet_pool_free(struct packet_pool *pool);
struct packet *packet_pool_pop(struct packet_pool *pool);
void packet_pool_push(struct packet_pool *pool, struct packet *pkt);
uint64_t packet_pool_get_used_num(const struct packet_pool *pool);
uint64_t packet_pool_get_free_num(const struct packet_pool *pool);
#ifdef __cplusplus
}
#endif

View File

@@ -1,4 +1,5 @@
#include <assert.h>
#include <sys/queue.h>
#include "tuple.h"
#include "uthash.h"
@@ -23,14 +24,14 @@ const struct route_ctx *packet_get_route_ctx(const struct packet *pkt)
return &pkt->meta.route_ctx;
}
void packet_set_origin_ctx(struct packet *pkt, void *ctx)
void packet_set_origin(struct packet *pkt, struct packet_origin *origin)
{
pkt->meta.origin_ctx = ctx;
pkt->origin = *origin;
}
const void *packet_get_origin_ctx(const struct packet *pkt)
struct packet_origin *packet_get_origin(struct packet *pkt)
{
return pkt->meta.origin_ctx;
return &pkt->origin;
}
void packet_set_sids(struct packet *pkt, const struct sids *sids)
@@ -925,7 +926,14 @@ struct packet *packet_dup(const struct packet *pkt)
dup_pkt->frag_layer = &dup_pkt->layers[pkt->frag_layer - pkt->layers];
}
memcpy(&dup_pkt->meta, &pkt->meta, sizeof(struct metadata));
packet_set_origin_ctx(dup_pkt, (void *)NULL);
struct packet_origin origin = {
.type = ORIGIN_TYPE_USER,
.ctx = NULL,
.cb = NULL,
.args = NULL,
.thr_idx = -1,
};
packet_set_origin(dup_pkt, &origin);
return dup_pkt;
}
@@ -941,6 +949,11 @@ void packet_free(struct packet *pkt)
return;
}
if (pkt->origin.cb)
{
pkt->origin.cb(pkt, pkt->origin.args);
}
if (pkt->need_free)
{
free((void *)pkt);
@@ -953,14 +966,54 @@ int packet_is_fragment(const struct packet *pkt)
return (pkt->frag_layer) ? 1 : 0;
}
int packet_is_defraged(const struct packet *pkt)
{
return pkt->is_defraged;
}
void packet_set_defraged(struct packet *pkt)
{
pkt->is_defraged = 1;
TAILQ_INIT(&pkt->frag_list);
}
void packet_push_frag(struct packet *pkt, struct packet *frag)
{
if (!packet_is_defraged(pkt))
{
assert(0);
return;
}
TAILQ_INSERT_TAIL(&pkt->frag_list, frag, frag_tqe);
}
struct packet *packet_pop_frag(struct packet *pkt)
{
if (!packet_is_defraged(pkt))
{
assert(0);
return NULL;
}
struct packet *frag = TAILQ_FIRST(&pkt->frag_list);
if (frag)
{
TAILQ_REMOVE(&pkt->frag_list, frag, frag_tqe);
}
return frag;
}
void packet_set_exdata(struct packet *pkt, int idx, void *ex_ptr)
{
struct exdata_runtime *exdata_rt = (struct exdata_runtime *)packet_get_user_data(pkt);
assert(exdata_rt);
exdata_set(exdata_rt, idx, ex_ptr);
}
void *packet_get_exdata(struct packet *pkt, int idx)
{
struct exdata_runtime *exdata_rt = (struct exdata_runtime *)packet_get_user_data(pkt);
assert(exdata_rt);
return exdata_get(exdata_rt, idx);
}

View File

@@ -55,6 +55,9 @@ target_link_libraries(gtest_packet_filter packet_manager gtest)
add_executable(gtest_packet_ldbc gtest_packet_ldbc.cpp)
target_link_libraries(gtest_packet_ldbc packet_manager gtest)
add_executable(gtest_packet_pool gtest_packet_pool.cpp)
target_link_libraries(gtest_packet_pool packet_manager gtest)
add_executable(gtest_packet_manager gtest_packet_manager.cpp)
target_link_libraries(gtest_packet_manager packet_manager gtest)
@@ -78,6 +81,7 @@ gtest_discover_tests(gtest_packet_parser)
gtest_discover_tests(gtest_packet_builder)
gtest_discover_tests(gtest_packet_filter)
gtest_discover_tests(gtest_packet_ldbc)
gtest_discover_tests(gtest_packet_pool)
gtest_discover_tests(gtest_packet_manager)
file(COPY ../../../conf/ DESTINATION ./conf/)

View File

@@ -2,7 +2,6 @@
#include "packet_parser.h"
#include "packet_internal.h"
#include "packet_manager_runtime.h"
#include "packet_manager_internal.h"
/******************************************************************************
@@ -90,7 +89,7 @@ TEST(PACKET_MANAGER, NEW_FREE)
struct mq_schema *mq_schema = mq_schema_new();
EXPECT_TRUE(mq_schema);
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, "./conf/stellar.toml");
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, 1);
EXPECT_TRUE(pkt_mgr);
packet_manager_free(pkt_mgr);
@@ -120,7 +119,7 @@ TEST(PACKET_MANAGER, SUBSCRIBER_PACKET_STAGE)
EXPECT_TRUE(mq_rt);
// module init
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, "./conf/stellar.toml");
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, 1);
EXPECT_TRUE(pkt_mgr);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, on_packet_stage, NULL) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, on_packet_stage, NULL) == 0);
@@ -178,11 +177,11 @@ static void packet_claimed(struct packet *pkt, void *args)
free(str);
}
static void on_packet_stage_claim_packet_success(enum packet_stage stage, struct packet *pkt, void *args)
static void claim_packet_success(enum packet_stage stage, struct packet *pkt, void *args)
{
struct packet_manager *pkt_mgr = (struct packet_manager *)args;
printf("on_packet_stage_claim_packet_success: %s\n", packet_stage_to_str(stage));
printf("claim_packet_success: %s\n", packet_stage_to_str(stage));
static int count = 0;
EXPECT_TRUE(count == 0);
@@ -193,11 +192,11 @@ static void on_packet_stage_claim_packet_success(enum packet_stage stage, struct
count++;
}
static void on_packet_stage_claim_packet_failed(enum packet_stage stage, struct packet *pkt, void *args)
static void claim_packet_failed(enum packet_stage stage, struct packet *pkt, void *args)
{
struct packet_manager *pkt_mgr = (struct packet_manager *)args;
printf("on_packet_stage_claim_packet_failed: %s\n", packet_stage_to_str(stage));
printf("claim_packet_failed: %s\n", packet_stage_to_str(stage));
static int count = 0;
EXPECT_TRUE(count == 0);
@@ -217,19 +216,19 @@ TEST(PACKET_MANAGER, CLAIM_PACKET)
EXPECT_TRUE(mq_rt);
// module init
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, "./conf/stellar.toml");
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, 1);
EXPECT_TRUE(pkt_mgr);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, on_packet_stage_claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, on_packet_stage_claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_FORWARD, on_packet_stage_claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_OUTPUT, on_packet_stage_claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_POSTROUTING, on_packet_stage_claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_FORWARD, claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_OUTPUT, claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_POSTROUTING, claim_packet_success, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, on_packet_stage_claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, on_packet_stage_claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_FORWARD, on_packet_stage_claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_OUTPUT, on_packet_stage_claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_POSTROUTING, on_packet_stage_claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_FORWARD, claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_OUTPUT, claim_packet_failed, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_POSTROUTING, claim_packet_failed, pkt_mgr) == 0);
// per-thread init
packet_manager_init(pkt_mgr, thread_id, mq_rt);
@@ -296,7 +295,7 @@ TEST(PACKET_MANAGER, SCHEDULE_PACKET)
EXPECT_TRUE(mq_rt);
// module init
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, "./conf/stellar.toml");
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, 1);
EXPECT_TRUE(pkt_mgr);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, on_packet_stage_schedule_packet, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, on_packet_stage_schedule_packet, pkt_mgr) == 0);
@@ -398,7 +397,7 @@ TEST(PACKET_MANAGER, SCHEDULE_CLAIMED_PACKET)
EXPECT_TRUE(mq_rt);
// module init
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, "./conf/stellar.toml");
struct packet_manager *pkt_mgr = packet_manager_new(mq_schema, 1);
EXPECT_TRUE(pkt_mgr);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_PREROUTING, on_packet_stage_claim_packet_to_schedule, pkt_mgr) == 0);
EXPECT_TRUE(packet_manager_subscribe(pkt_mgr, PACKET_STAGE_INPUT, on_packet_stage_claim_packet_to_schedule, pkt_mgr) == 0);

View File

@@ -0,0 +1,64 @@
#include <gtest/gtest.h>
#include "packet_pool.h"
TEST(PACKET_POOL, TEST)
{
struct packet *pkt1 = NULL;
struct packet *pkt2 = NULL;
struct packet *pkt3 = NULL;
struct packet *pkt4 = NULL;
// new
struct packet_pool *pool = packet_pool_new(3);
EXPECT_TRUE(pool != NULL);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 0);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 3);
// pop
pkt1 = packet_pool_pop(pool);
EXPECT_TRUE(pkt1 != NULL);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 1);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 2);
pkt2 = packet_pool_pop(pool);
EXPECT_TRUE(pkt2 != NULL);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 2);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 1);
pkt3 = packet_pool_pop(pool);
EXPECT_TRUE(pkt3 != NULL);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 3);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 0);
pkt4 = packet_pool_pop(pool);
EXPECT_TRUE(pkt4 != NULL);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 4);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 0);
// push
packet_pool_push(pool, pkt1);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 3);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 1);
packet_pool_push(pool, pkt2);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 2);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 2);
packet_pool_push(pool, pkt3);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 1);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 3);
packet_pool_push(pool, pkt4);
EXPECT_TRUE(packet_pool_get_used_num(pool) == 0);
EXPECT_TRUE(packet_pool_get_free_num(pool) == 3);
// free
packet_pool_free(pool);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@@ -14,15 +14,13 @@ extern "C"
#include "stellar/session.h"
#include "tcp_reassembly.h"
#define EX_DATA_MAX_COUNT 4
// output format: "${src_addr}:${src_port}-${dst_addr}:${dst_port}-${ip_proto}-${domain}"
// output max len: (46 + 1 + 5) + 1 + (46 + 1 + 5) + 1 + 1 + 1 + 20 = 129
#define TUPLE6_STR_SIZE 130
struct tcp_half
{
struct tcp_reassembly *assembler;
struct tcp_reassembly *tcp_reass;
struct tcp_segment in_order; // current packet in order segment
uint32_t in_order_ref; // reference count of current packet in order segment
@@ -62,7 +60,6 @@ struct session
struct route_ctx route_ctx[MAX_FLOW_TYPE];
const struct packet *first_pkt[MAX_FLOW_TYPE];
const struct packet *curr_pkt;
void *ex_data[EX_DATA_MAX_COUNT];
void *user_data;
int is_symmetric;
int dup;
@@ -75,7 +72,7 @@ struct session
struct session_manager_stat *sess_mgr_stat;
};
TAILQ_HEAD(session_list, session);
TAILQ_HEAD(session_queue, session);
void session_init(struct session *sess);

View File

@@ -6,7 +6,7 @@
#include "stellar/session_manager.h"
#include "stellar/module_manager.h"
#include "utils.h"
#include "utils_internal.h"
#include "session_internal.h"
#include "session_manager_runtime.h"
@@ -15,7 +15,6 @@
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wunused-function"
struct session_manager_schema
{
struct exdata_schema *exdata;
@@ -133,6 +132,10 @@ fast_path:
static void on_packet_output(enum packet_stage stage, struct packet *pkt, void *args)
{
struct session_manager *sess_mgr = (struct session_manager *)args;
struct stellar_module_manager *mod_mgr = sess_mgr->mod_mgr;
int thread_id = stellar_module_manager_get_thread_id(mod_mgr);
struct session_manager_runtime *sess_mgr_rt = sess_mgr->runtime[thread_id];
struct session *sess = (struct session *)packet_get_exdata(pkt, sess_mgr->schema->pkt_exdata_idx);
if (sess)
{
@@ -157,11 +160,17 @@ static void on_packet_output(enum packet_stage stage, struct packet *pkt, void *
session_set_current_packet(sess, NULL);
session_set_flow_type(sess, FLOW_TYPE_NONE);
}
if (packet_get_origin(pkt) == NULL)
{
session_manager_runtime_record_duplicated_packet(sess_mgr_rt, pkt);
}
}
static void clean_session(struct session_manager_runtime *sess_mgr_rt, uint64_t now_ms)
{
#define MAX_CLEANED_SESS 1024
char buffer[4096] = {0};
struct session *sess = NULL;
struct session *cleaned_sess[MAX_CLEANED_SESS] = {NULL};
@@ -169,6 +178,16 @@ static void clean_session(struct session_manager_runtime *sess_mgr_rt, uint64_t
for (uint64_t j = 0; j < used; j++)
{
sess = cleaned_sess[j];
session_to_str(sess, 0, buffer, sizeof(buffer));
SESSION_MANAGER_LOG_INFO("session free: %s", buffer);
// TODO publish session free msg
// TODO mq_runtime_dispatch_immediate()
struct exdata_runtime *exdata_rt = (struct exdata_runtime *)session_get_user_data(sess);
exdata_runtime_free(exdata_rt);
session_manager_runtime_free_session(sess_mgr_rt, sess);
}
}
@@ -236,8 +255,6 @@ struct session_manager_schema *session_manager_schema_new(struct packet_manager
goto error_out;
}
// TODO register polling
sess_mgr_schema->mq = mq;
sess_mgr_schema->pkt_exdata_idx = packet_manager_new_packet_exdata_index(pkt_mgr, "session_manager", NULL, NULL);
if (sess_mgr_schema->pkt_exdata_idx == -1)
@@ -316,19 +333,16 @@ void session_manager_free(struct session_manager *sess_mgr)
struct session_manager *session_manager_new(struct stellar_module_manager *mod_mgr, struct packet_manager *pkt_mgr, struct mq_schema *mq_schema, const char *toml_file)
{
assert(pkt_mgr);
assert(mq_schema);
assert(toml_file);
uint64_t thread_num;
uint64_t instance_id;
uint64_t now_ms = clock_get_real_time_ms();
if (load_and_validate_toml_integer_config(toml_file, "instance.id", (uint64_t *)&instance_id, 0, 4095))
if (load_toml_integer_config(toml_file, "instance.id", (uint64_t *)&instance_id, 0, 4095))
{
return NULL;
}
if (load_and_validate_toml_integer_config(toml_file, "packet_io.nr_worker_thread", (uint64_t *)&thread_num, 0, MAX_THREAD_NUM))
if (load_toml_integer_config(toml_file, "packet_io.thread_num", (uint64_t *)&thread_num, 0, MAX_THREAD_NUM))
{
return NULL;
}

View File

@@ -3,7 +3,7 @@
#include <assert.h>
#include <errno.h>
#include "utils.h"
#include "utils_internal.h"
#include "packet_helper.h"
#include "packet_filter.h"
#include "session_internal.h"
@@ -28,7 +28,7 @@ struct snowflake
struct session_manager_runtime
{
struct session_list evicte_list;
struct session_queue evicte_list;
struct session_pool *sess_pool;
struct session_timer *sess_timer;
struct session_table *tcp_sess_table;
@@ -167,30 +167,30 @@ static uint64_t snowflake_generate(struct snowflake *sf, uint64_t now_sec)
static void tcp_clean(struct session_manager_runtime *sess_mgr_rt, struct session *sess)
{
struct tcp_reassembly *c2s_ssembler = sess->tcp_halfs[FLOW_TYPE_C2S].assembler;
struct tcp_reassembly *s2c_ssembler = sess->tcp_halfs[FLOW_TYPE_S2C].assembler;
struct tcp_reassembly *c2s_tcp_reass = sess->tcp_halfs[FLOW_TYPE_C2S].tcp_reass;
struct tcp_reassembly *s2c_tcp_reass = sess->tcp_halfs[FLOW_TYPE_S2C].tcp_reass;
struct tcp_segment *seg;
if (c2s_ssembler)
if (c2s_tcp_reass)
{
while ((seg = tcp_reassembly_expire(c2s_ssembler, UINT64_MAX)))
while ((seg = tcp_reassembly_expire(c2s_tcp_reass, UINT64_MAX)))
{
session_inc_stat(sess, FLOW_TYPE_C2S, STAT_TCP_SEGMENTS_RELEASED, 1);
session_inc_stat(sess, FLOW_TYPE_C2S, STAT_TCP_PAYLOADS_RELEASED, seg->len);
sess_mgr_rt->stat.tcp_segs_freed++;
tcp_segment_free(seg);
}
tcp_reassembly_free(c2s_ssembler);
tcp_reassembly_free(c2s_tcp_reass);
}
if (s2c_ssembler)
if (s2c_tcp_reass)
{
while ((seg = tcp_reassembly_expire(s2c_ssembler, UINT64_MAX)))
while ((seg = tcp_reassembly_expire(s2c_tcp_reass, UINT64_MAX)))
{
session_inc_stat(sess, FLOW_TYPE_S2C, STAT_TCP_SEGMENTS_RELEASED, 1);
session_inc_stat(sess, FLOW_TYPE_S2C, STAT_TCP_PAYLOADS_RELEASED, seg->len);
sess_mgr_rt->stat.tcp_segs_freed++;
tcp_segment_free(seg);
}
tcp_reassembly_free(s2c_ssembler);
tcp_reassembly_free(s2c_tcp_reass);
}
}
@@ -201,18 +201,18 @@ static int tcp_init(struct session_manager_runtime *sess_mgr_rt, struct session
return 0;
}
sess->tcp_halfs[FLOW_TYPE_C2S].assembler = tcp_reassembly_new(sess_mgr_rt->cfg.tcp_reassembly.timeout_ms, sess_mgr_rt->cfg.tcp_reassembly.buffered_segments_max);
sess->tcp_halfs[FLOW_TYPE_S2C].assembler = tcp_reassembly_new(sess_mgr_rt->cfg.tcp_reassembly.timeout_ms, sess_mgr_rt->cfg.tcp_reassembly.buffered_segments_max);
if (sess->tcp_halfs[FLOW_TYPE_C2S].assembler == NULL || sess->tcp_halfs[FLOW_TYPE_S2C].assembler == NULL)
sess->tcp_halfs[FLOW_TYPE_C2S].tcp_reass = tcp_reassembly_new(sess_mgr_rt->cfg.tcp_reassembly.timeout_ms, sess_mgr_rt->cfg.tcp_reassembly.buffered_segments_max);
sess->tcp_halfs[FLOW_TYPE_S2C].tcp_reass = tcp_reassembly_new(sess_mgr_rt->cfg.tcp_reassembly.timeout_ms, sess_mgr_rt->cfg.tcp_reassembly.buffered_segments_max);
if (sess->tcp_halfs[FLOW_TYPE_C2S].tcp_reass == NULL || sess->tcp_halfs[FLOW_TYPE_S2C].tcp_reass == NULL)
{
tcp_clean(sess_mgr_rt, sess);
return -1;
}
SESSION_MANAGER_LOG_DEBUG("session %lu %s new c2s tcp assembler %p, s2c tcp assembler %p",
SESSION_MANAGER_LOG_DEBUG("session %lu %s new c2s tcp tcp_reass %p, s2c tcp tcp_reass %p",
session_get_id(sess), session_get0_readable_addr(sess),
sess->tcp_halfs[FLOW_TYPE_C2S].assembler,
sess->tcp_halfs[FLOW_TYPE_S2C].assembler);
sess->tcp_halfs[FLOW_TYPE_C2S].tcp_reass,
sess->tcp_halfs[FLOW_TYPE_S2C].tcp_reass);
return 0;
}
@@ -257,10 +257,10 @@ static void tcp_update(struct session_manager_runtime *sess_mgr_rt, struct sessi
if (unlikely(flags & TH_SYN))
{
// len > 0 is SYN with data (TCP Fast Open)
tcp_reassembly_set_recv_next(half->assembler, len ? half->seq : half->seq + 1);
tcp_reassembly_set_recv_next(half->tcp_reass, len ? half->seq : half->seq + 1);
}
seg = tcp_reassembly_expire(half->assembler, sess_mgr_rt->now_ms);
seg = tcp_reassembly_expire(half->tcp_reass, sess_mgr_rt->now_ms);
if (seg)
{
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_EXPIRED, 1);
@@ -280,7 +280,7 @@ static void tcp_update(struct session_manager_runtime *sess_mgr_rt, struct sessi
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_RECEIVED, len);
sess_mgr_rt->stat.tcp_segs_input++;
uint32_t rcv_nxt = tcp_reassembly_get_recv_next(half->assembler);
uint32_t rcv_nxt = tcp_reassembly_get_recv_next(half->tcp_reass);
// in order
if (half->seq == rcv_nxt)
{
@@ -291,7 +291,7 @@ static void tcp_update(struct session_manager_runtime *sess_mgr_rt, struct sessi
half->in_order.data = tcp_layer->pld_ptr;
half->in_order.len = len;
half->in_order_ref = 0;
tcp_reassembly_inc_recv_next(half->assembler, len);
tcp_reassembly_inc_recv_next(half->tcp_reass, len);
}
// retransmission
else if (uint32_before(uint32_add(half->seq, len), rcv_nxt))
@@ -302,7 +302,7 @@ static void tcp_update(struct session_manager_runtime *sess_mgr_rt, struct sessi
}
else if ((seg = tcp_segment_new(half->seq, tcp_layer->pld_ptr, len)))
{
switch (tcp_reassembly_push(half->assembler, seg, sess_mgr_rt->now_ms))
switch (tcp_reassembly_push(half->tcp_reass, seg, sess_mgr_rt->now_ms))
{
case -2:
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_RETRANSMIT, 1);
@@ -481,39 +481,39 @@ struct session_manager_config *session_manager_config_new(const char *toml_file)
}
int ret = 0;
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_session_max", (uint64_t *)&sess_mgr_cfg->tcp_session_max, EVICTE_SESSION_BURST * 2, UINT64_MAX);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.udp_session_max", (uint64_t *)&sess_mgr_cfg->udp_session_max, EVICTE_SESSION_BURST * 2, UINT64_MAX);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_session_max", (uint64_t *)&sess_mgr_cfg->tcp_session_max, EVICTE_SESSION_BURST * 2, UINT64_MAX);
ret += load_toml_integer_config(toml_file, "session_manager.udp_session_max", (uint64_t *)&sess_mgr_cfg->udp_session_max, EVICTE_SESSION_BURST * 2, UINT64_MAX);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evict_old_on_tcp_table_limit", (uint64_t *)&sess_mgr_cfg->evict_old_on_tcp_table_limit, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evict_old_on_udp_table_limit", (uint64_t *)&sess_mgr_cfg->evict_old_on_udp_table_limit, 0, 1);
ret += load_toml_integer_config(toml_file, "session_manager.evict_old_on_tcp_table_limit", (uint64_t *)&sess_mgr_cfg->evict_old_on_tcp_table_limit, 0, 1);
ret += load_toml_integer_config(toml_file, "session_manager.evict_old_on_udp_table_limit", (uint64_t *)&sess_mgr_cfg->evict_old_on_udp_table_limit, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.expire_period_ms", (uint64_t *)&sess_mgr_cfg->expire_period_ms, 0, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.expire_batch_max", (uint64_t *)&sess_mgr_cfg->expire_batch_max, 1, 1024);
ret += load_toml_integer_config(toml_file, "session_manager.expire_period_ms", (uint64_t *)&sess_mgr_cfg->expire_period_ms, 0, 60000);
ret += load_toml_integer_config(toml_file, "session_manager.expire_batch_max", (uint64_t *)&sess_mgr_cfg->expire_batch_max, 1, 1024);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.init", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.init, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.handshake", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.handshake, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.data", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.data, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.half_closed", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.half_closed, 1, 604800000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.time_wait", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.time_wait, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.discard_default", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.discard_default, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.unverified_rst", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.unverified_rst, 1, 60000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.init", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.init, 1, 60000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.handshake", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.handshake, 1, 60000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.data", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.data, 1, 15999999000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.half_closed", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.half_closed, 1, 604800000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.time_wait", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.time_wait, 1, 60000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.discard_default", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.discard_default, 1, 15999999000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.unverified_rst", (uint64_t *)&sess_mgr_cfg->tcp_timeout_ms.unverified_rst, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.udp_timeout_ms.data", (uint64_t *)&sess_mgr_cfg->udp_timeout_ms.data, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.udp_timeout_ms.discard_default", (uint64_t *)&sess_mgr_cfg->udp_timeout_ms.discard_default, 1, 15999999000);
ret += load_toml_integer_config(toml_file, "session_manager.udp_timeout_ms.data", (uint64_t *)&sess_mgr_cfg->udp_timeout_ms.data, 1, 15999999000);
ret += load_toml_integer_config(toml_file, "session_manager.udp_timeout_ms.discard_default", (uint64_t *)&sess_mgr_cfg->udp_timeout_ms.discard_default, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.enable", (uint64_t *)&sess_mgr_cfg->duplicated_packet_bloom_filter.enable, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.capacity", (uint64_t *)&sess_mgr_cfg->duplicated_packet_bloom_filter.capacity, 1, 4294967295);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.time_window_ms", (uint64_t *)&sess_mgr_cfg->duplicated_packet_bloom_filter.time_window_ms, 1, 60000);
ret += load_and_validate_toml_double_config(toml_file, "session_manager.duplicated_packet_bloom_filter.error_rate", (double *)&sess_mgr_cfg->duplicated_packet_bloom_filter.error_rate, 0.0, 1.0);
ret += load_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.enable", (uint64_t *)&sess_mgr_cfg->duplicated_packet_bloom_filter.enable, 0, 1);
ret += load_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.capacity", (uint64_t *)&sess_mgr_cfg->duplicated_packet_bloom_filter.capacity, 1, 4294967295);
ret += load_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.time_window_ms", (uint64_t *)&sess_mgr_cfg->duplicated_packet_bloom_filter.time_window_ms, 1, 60000);
ret += load_toml_double_config(toml_file, "session_manager.duplicated_packet_bloom_filter.error_rate", (double *)&sess_mgr_cfg->duplicated_packet_bloom_filter.error_rate, 0.0, 1.0);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.enable", (uint64_t *)&sess_mgr_cfg->evicted_session_bloom_filter.enable, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.capacity", (uint64_t *)&sess_mgr_cfg->evicted_session_bloom_filter.capacity, 1, 4294967295);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.time_window_ms", (uint64_t *)&sess_mgr_cfg->evicted_session_bloom_filter.time_window_ms, 1, 60000);
ret += load_and_validate_toml_double_config(toml_file, "session_manager.evicted_session_bloom_filter.error_rate", (double *)&sess_mgr_cfg->evicted_session_bloom_filter.error_rate, 0.0, 1.0);
ret += load_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.enable", (uint64_t *)&sess_mgr_cfg->evicted_session_bloom_filter.enable, 0, 1);
ret += load_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.capacity", (uint64_t *)&sess_mgr_cfg->evicted_session_bloom_filter.capacity, 1, 4294967295);
ret += load_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.time_window_ms", (uint64_t *)&sess_mgr_cfg->evicted_session_bloom_filter.time_window_ms, 1, 60000);
ret += load_toml_double_config(toml_file, "session_manager.evicted_session_bloom_filter.error_rate", (double *)&sess_mgr_cfg->evicted_session_bloom_filter.error_rate, 0.0, 1.0);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_reassembly.enable", (uint64_t *)&sess_mgr_cfg->tcp_reassembly.enable, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_reassembly.timeout_ms", (uint64_t *)&sess_mgr_cfg->tcp_reassembly.timeout_ms, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_reassembly.buffered_segments_max", (uint64_t *)&sess_mgr_cfg->tcp_reassembly.buffered_segments_max, 1, 512);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_reassembly.enable", (uint64_t *)&sess_mgr_cfg->tcp_reassembly.enable, 0, 1);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_reassembly.timeout_ms", (uint64_t *)&sess_mgr_cfg->tcp_reassembly.timeout_ms, 1, 60000);
ret += load_toml_integer_config(toml_file, "session_manager.tcp_reassembly.buffered_segments_max", (uint64_t *)&sess_mgr_cfg->tcp_reassembly.buffered_segments_max, 1, 512);
if (ret != 0)
{
@@ -1066,9 +1066,6 @@ void session_manager_runtime_free_session(struct session_manager_runtime *sess_m
{
if (sess)
{
struct exdata_runtime *exdata_rt = (struct exdata_runtime *)session_get_user_data(sess);
exdata_runtime_free(exdata_rt);
SESSION_MANAGER_RUNTIME_LOG_DEBUG("session %lu closed (%s)", session_get_id(sess), closing_reason_to_str(session_get_closing_reason(sess)));
SESSION_MANAGER_LOG_DEBUG("session %lu closed (%s)", session_get_id(sess), closing_reason_to_str(session_get_closing_reason(sess)));
@@ -1353,7 +1350,7 @@ uint64_t session_manager_runtime_scan(const struct session_manager_runtime *sess
{
return mached_sess_num;
}
capacity = session_pool_capacity_size(sess_mgr_rt->sess_pool);
capacity = sess_mgr_rt->cfg.tcp_session_max + sess_mgr_rt->cfg.udp_session_max;
if (opts->cursor >= capacity)
{
return mached_sess_num;

View File

@@ -1,3 +1,4 @@
#include <assert.h>
#include <stdlib.h>
#include "session_internal.h"
@@ -6,8 +7,9 @@
struct session_pool
{
uint64_t capacity;
uint64_t available;
struct session_list free_list;
uint64_t used;
uint64_t free;
struct session_queue free_list;
};
struct session_pool *session_pool_new(uint64_t capacity)
@@ -17,7 +19,8 @@ struct session_pool *session_pool_new(uint64_t capacity)
{
return NULL;
}
pool->available = 0;
pool->used = 0;
pool->free = 0;
pool->capacity = capacity;
TAILQ_INIT(&pool->free_list);
@@ -26,7 +29,7 @@ struct session_pool *session_pool_new(uint64_t capacity)
{
struct session *sess = &array[i];
TAILQ_INSERT_TAIL(&pool->free_list, sess, free_tqe);
pool->available++;
pool->free++;
}
return pool;
@@ -40,8 +43,10 @@ void session_pool_free(struct session_pool *pool)
while ((sess = TAILQ_FIRST(&pool->free_list)))
{
TAILQ_REMOVE(&pool->free_list, sess, free_tqe);
pool->available--;
pool->free--;
}
assert(pool->free == 0);
assert(pool->used == 0);
free(pool);
pool = NULL;
@@ -59,7 +64,8 @@ struct session *session_pool_pop(struct session_pool *pool)
if (sess)
{
TAILQ_REMOVE(&pool->free_list, sess, free_tqe);
pool->available--;
pool->free--;
pool->used++;
}
return sess;
@@ -73,7 +79,8 @@ void session_pool_push(struct session_pool *pool, struct session *sess)
}
TAILQ_INSERT_TAIL(&pool->free_list, sess, free_tqe);
pool->available++;
pool->free++;
pool->used--;
}
const struct session *session_pool_get0(const struct session_pool *pool, uint64_t idx)
@@ -87,22 +94,12 @@ const struct session *session_pool_get0(const struct session_pool *pool, uint64_
return &array[idx];
}
uint64_t session_pool_available_num(const struct session_pool *pool)
uint64_t session_pool_get_free_num(const struct session_pool *pool)
{
if (pool == NULL)
{
return 0;
}
return pool->available;
return pool->free;
}
uint64_t session_pool_capacity_size(const struct session_pool *pool)
uint64_t session_pool_get_used_num(const struct session_pool *pool)
{
if (pool == NULL)
{
return 0;
}
return pool->capacity;
return pool->used;
}

View File

@@ -15,8 +15,8 @@ struct session *session_pool_pop(struct session_pool *pool);
void session_pool_push(struct session_pool *pool, struct session *sess);
const struct session *session_pool_get0(const struct session_pool *pool, uint64_t idx);
uint64_t session_pool_available_num(const struct session_pool *pool);
uint64_t session_pool_capacity_size(const struct session_pool *pool);
uint64_t session_pool_get_free_num(const struct session_pool *pool);
uint64_t session_pool_get_used_num(const struct session_pool *pool);
#ifdef __cplusplus
}

View File

@@ -15,7 +15,7 @@ struct session_table
void *arg;
uint64_t count;
struct session_list lru_list;
struct session_queue lru_list;
};
/******************************************************************************

View File

@@ -214,7 +214,7 @@ struct tcp_segment *session_get_tcp_segment(struct session *sess)
}
else
{
struct tcp_segment *seg = tcp_reassembly_pop(half->assembler);
struct tcp_segment *seg = tcp_reassembly_pop(half->tcp_reass);
if (seg)
{
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_REORDERED, 1);

View File

@@ -1,6 +1,6 @@
#include <gtest/gtest.h>
#include "utils.h"
#include "utils_internal.h"
#include "packet_internal.h"
#include "packet_parser.h"
#include "session_internal.h"

View File

@@ -1,6 +1,6 @@
#include <gtest/gtest.h>
#include "utils.h"
#include "utils_internal.h"
#include "packet_internal.h"
#include "packet_parser.h"
#include "session_internal.h"

View File

@@ -10,49 +10,47 @@ TEST(SESSION_POOL, POP_PUSH)
struct session *sess4 = NULL;
struct session_pool *sess_pool = NULL;
// new
sess_pool = session_pool_new(3);
EXPECT_TRUE(sess_pool != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 3);
EXPECT_TRUE(session_pool_capacity_size(sess_pool) == 3);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 3);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 0);
// pop
sess1 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess1 != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 2);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 2);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 1);
sess2 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess2 != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 1);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 1);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 2);
sess3 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess3 != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 0);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 0);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 3);
sess4 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess4 == NULL);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 0);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 3);
// push
session_pool_push(sess_pool, sess1);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 1);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 1);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 2);
session_pool_push(sess_pool, sess2);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 2);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 2);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 1);
session_pool_push(sess_pool, sess3);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 3);
sess1 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess1 != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 2);
sess2 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess2 != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 1);
sess3 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess3 != NULL);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 0);
sess4 = session_pool_pop(sess_pool);
EXPECT_TRUE(sess4 == NULL);
session_pool_push(sess_pool, sess1);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 1);
session_pool_push(sess_pool, sess2);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 2);
session_pool_push(sess_pool, sess3);
EXPECT_TRUE(session_pool_available_num(sess_pool) == 3);
EXPECT_TRUE(session_pool_get_free_num(sess_pool) == 3);
EXPECT_TRUE(session_pool_get_used_num(sess_pool) == 0);
// free
session_pool_free(sess_pool);
}

View File

@@ -2,11 +2,14 @@
#include <pthread.h>
#include <sys/prctl.h>
#include "packet_io.h"
#include "packet_internal.h"
#include "packet_manager_internal.h"
#include "stellar/stellar.h"
#include "stellar/module_manager.h"
#include "packet_io.h"
#include "log_internal.h"
#include "packet_internal.h"
#include "utils_internal.h"
#include "packet_manager_internal.h"
#include "module_manager_interna.h"
#define CORE_LOG_FATAL(format, ...) STELLAR_LOG_FATAL(__thread_local_logger, "core", format, ##__VA_ARGS__)
@@ -19,7 +22,7 @@ static __attribute__((__used__)) const char *version = STELLAR_GIT_VERSION;
static __attribute__((__used__)) const char *version = "Unknown";
#endif
struct stellar_thread
struct thread
{
pthread_t tid;
uint16_t idx;
@@ -35,16 +38,16 @@ struct stellar
struct packet_io *pkt_io;
struct mq_schema *mq_schema;
struct stellar_module_manager *mod_mgr;
struct stellar_thread threads[MAX_THREAD_NUM];
struct thread threads[MAX_THREAD_NUM];
};
static void *worker_thread(void *arg)
{
int nr_pkt_rcv = 0;
int nr_recv = 0;
char thread_name[16] = {0};
struct packet *pkt = NULL;
struct packet packets[RX_BURST_MAX];
struct stellar_thread *thread = (struct stellar_thread *)arg;
struct packet *pkts[RX_BURST_MAX] = {NULL};
struct thread *thread = (struct thread *)arg;
uint16_t thread_id = thread->idx;
struct stellar *st = thread->st;
struct packet_io *pkt_io = st->pkt_io;
@@ -58,63 +61,44 @@ static void *worker_thread(void *arg)
__thread_local_logger = st->logger;
stellar_module_manager_register_thread(mod_mgr, thread_id, mq_rt);
if (packet_io_init(pkt_io, thread_id) != 0)
if (packet_manager_init(pkt_mgr, thread_id, mq_rt) != 0)
{
CORE_LOG_ERROR("unable to init packet io");
CORE_LOG_ERROR("unable to init packet manager");
return NULL;
}
packet_manager_init(pkt_mgr, thread_id, mq_rt);
ATOMIC_SET(&thread->is_runing, 1);
CORE_LOG_FATAL("worker thread %d runing", thread_id);
while (ATOMIC_READ(&st->need_exit) == 0)
{
// TODO
memset(packets, 0, sizeof(packets));
nr_pkt_rcv = packet_io_ingress(pkt_io, thread_id, packets, RX_BURST_MAX);
if (nr_pkt_rcv == 0)
nr_recv = packet_io_recv(pkt_io, thread_id, pkts, RX_BURST_MAX);
for (int i = 0; i < nr_recv; i++)
{
goto idle_tasks;
packet_manager_ingress(pkt_mgr, thread_id, pkts[i]);
}
for (int i = 0; i < nr_pkt_rcv; i++)
packet_manager_dispatch(pkt_mgr, thread_id);
while ((pkt = packet_manager_egress(pkt_mgr, thread_id)))
{
// TODO alloc struct packet from packet pool
pkt = calloc(1, sizeof(struct packet));
memcpy(pkt, &packets[i], sizeof(struct packet));
pkt->need_free = 1;
packet_manager_ingress(pkt_mgr, thread_id, pkt);
packet_manager_dispatch(pkt_mgr, thread_id);
pkt = packet_manager_egress(pkt_mgr, thread_id);
if (pkt == NULL)
{
continue;
}
if (packet_get_action(pkt) == PACKET_ACTION_DROP)
{
packet_io_drop(pkt_io, thread_id, pkt, 1);
packet_free(pkt);
packet_io_drop(pkt_io, thread_id, &pkt, 1);
}
else
{
packet_io_egress(pkt_io, thread_id, pkt, 1);
packet_free(pkt);
packet_io_send(pkt_io, thread_id, &pkt, 1);
}
stellar_polling_dispatch(mod_mgr);
}
idle_tasks:
stellar_polling_dispatch(mod_mgr);
if (nr_pkt_rcv == 0)
packet_io_polling(pkt_io, thread_id);
if (nr_recv == 0)
{
packet_io_yield(pkt_io, thread_id);
}
}
stellar_module_manager_unregister_thread(mod_mgr, thread_id);
mq_runtime_free(mq_rt);
@@ -126,9 +110,9 @@ static void *worker_thread(void *arg)
static int stellar_thread_run(struct stellar *st)
{
for (uint16_t i = 0; i < st->thread_num; i++)
for (uint64_t i = 0; i < st->thread_num; i++)
{
struct stellar_thread *thread = &st->threads[i];
struct thread *thread = &st->threads[i];
thread->idx = i;
thread->is_runing = 0;
thread->st = st;
@@ -144,14 +128,14 @@ static int stellar_thread_run(struct stellar *st)
static void stellar_thread_join(struct stellar *st)
{
for (uint16_t i = 0; i < st->thread_num; i++)
for (uint64_t i = 0; i < st->thread_num; i++)
{
if (st->threads[i].is_runing == 0)
{
continue;
}
struct stellar_thread *thread = &st->threads[i];
struct thread *thread = &st->threads[i];
pthread_join(thread->tid, NULL);
}
}
@@ -180,7 +164,7 @@ struct stellar *stellar_new(const char *toml_file)
__thread_local_logger = st->logger;
CORE_LOG_FATAL("stellar start (version: %s)", version);
if (load_and_validate_toml_integer_config(toml_file, "packet_io.nr_worker_thread", (uint64_t *)&st->thread_num, 1, MAX_THREAD_NUM) != 0)
if (load_toml_integer_config(toml_file, "packet_io.thread_num", (uint64_t *)&st->thread_num, 1, MAX_THREAD_NUM) != 0)
{
CORE_LOG_ERROR("unable to get thread number from config file");
goto error_out;

View File

@@ -1,608 +0,0 @@
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "log_internal.h"
#include "stellar_stat.h"
#include "fieldstat/fieldstat_easy.h"
#include "fieldstat/fieldstat_exporter.h"
#define STAT_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "stat", format, ##__VA_ARGS__)
#define STAT_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "stat", format, ##__VA_ARGS__)
#define IS_FREE 0
#define IS_BUSY 0xf
enum stat_type
{
// device packet
STAT_TYPE_PKTS_RX,
STAT_TYPE_BYTES_RX,
STAT_TYPE_PKTS_TX,
STAT_TYPE_BYTES_TX,
// keep-alive packet
STAT_TYPE_KEEP_ALIVE_PKTS,
STAT_TYPE_KEEP_ALIVE_BYTES,
// raw packet
STAT_TYPE_RAW_PKTS_RX,
STAT_TYPE_RAW_BYTES_RX,
STAT_TYPE_RAW_PKTS_TX,
STAT_TYPE_RAW_BYTES_TX,
// drop packet
STAT_TYPE_PKTS_DROPPED,
STAT_TYPE_BYTES_DROPPED,
// inject packet
STAT_TYPE_PKTS_INJECTED,
STAT_TYPE_BYTES_INJECTED,
// ctrl packet
STAT_TYPE_CTRL_PKTS_RX,
STAT_TYPE_CTRL_BYTES_RX,
STAT_TYPE_CTRL_PKTS_TX,
STAT_TYPE_CTRL_BYTES_TX,
// ipv4 reassembly
STAT_TYPE_IP4_DEFRAGS_EXPECTED,
STAT_TYPE_IP4_DEFRAGS_SUCCEED,
STAT_TYPE_IP4_DEFRAGS_FAILED_TIMEOUT,
STAT_TYPE_IP4_DEFRAGS_FAILED_INVALID_LENGTH,
STAT_TYPE_IP4_DEFRAGS_FAILED_OVERLAP,
STAT_TYPE_IP4_DEFRAGS_FAILED_TOO_MANY_FRAG,
STAT_TYPE_IP4_FRAGS,
STAT_TYPE_IP4_FRAGS_FREED,
STAT_TYPE_IP4_FRAGS_BUFFERED,
STAT_TYPE_IP4_FRAGS_BYPASS_NO_BUFFER,
STAT_TYPE_IP4_FRAGS_BYPASS_DUP_FIST_FRAG,
STAT_TYPE_IP4_FRAGS_BYPASS_DUP_LAST_FRAG,
// ipv6 reassembly
STAT_TYPE_IP6_DEFRAGS_EXPECTED,
STAT_TYPE_IP6_DEFRAGS_SUCCEED,
STAT_TYPE_IP6_DEFRAGS_FAILED_TIMEOUT,
STAT_TYPE_IP6_DEFRAGS_FAILED_INVALID_LENGTH,
STAT_TYPE_IP6_DEFRAGS_FAILED_OVERLAP,
STAT_TYPE_IP6_DEFRAGS_FAILED_TOO_MANY_FRAG,
STAT_TYPE_IP6_FRAGS,
STAT_TYPE_IP6_FRAGS_FREED,
STAT_TYPE_IP6_FRAGS_BUFFERED,
STAT_TYPE_IP6_FRAGS_BYPASS_NO_BUFFER,
STAT_TYPE_IP6_FRAGS_BYPASS_DUP_FIST_FRAG,
STAT_TYPE_IP6_FRAGS_BYPASS_DUP_LAST_FRAG,
// TCP session
STAT_TYPE_HISTORY_TCP_SESSIONS,
STAT_TYPE_TCP_SESS_USED,
STAT_TYPE_TCP_SESS_OPENING,
STAT_TYPE_TCP_SESS_ACTIVE,
STAT_TYPE_TCP_SESS_CLOSING,
STAT_TYPE_TCP_SESS_DISCARD,
STAT_TYPE_TCP_SESS_CLOSED,
// UDP session
STAT_TYPE_HISTORY_UDP_SESSIONS,
STAT_TYPE_UDP_SESS_USED,
STAT_TYPE_UDP_SESS_OPENING,
STAT_TYPE_UDP_SESS_ACTIVE,
STAT_TYPE_UDP_SESS_CLOSING,
STAT_TYPE_UDP_SESS_DISCARD,
STAT_TYPE_UDP_SESS_CLOSED,
// Evicted session
STAT_TYPE_TCP_SESS_EVICTED,
STAT_TYPE_UDP_SESS_EVICTED,
// Packet
STAT_TYPE_UDP_PKTS_BYPASS_TABLE_FULL,
STAT_TYPE_TCP_PKTS_BYPASS_TABLE_FULL,
STAT_TYPE_TCP_PKTS_BYPASS_SESSION_NOT_FOUND,
STAT_TYPE_TCP_PKTS_BYPASS_DUPLICATED,
STAT_TYPE_UDP_PKTS_BYPASS_DUPLICATED,
STAT_TYPE_UDP_PKTS_BYPASS_SESSION_EVICTED,
// TCP segments
STAT_TYPE_TCP_SEGS_INPUT,
STAT_TYPE_TCP_SEGS_CONSUMED,
STAT_TYPE_TCP_SEGS_TIMEOUT,
STAT_TYPE_TCP_SEGS_RETRANSMITED,
STAT_TYPE_TCP_SEGS_OVERLAPPED,
STAT_TYPE_TCP_SEGS_OMITTED_TOO_MANY,
STAT_TYPE_TCP_SEGS_INORDER,
STAT_TYPE_TCP_SEGS_REORDERED,
STAT_TYPE_TCP_SEGS_BUFFERED,
STAT_TYPE_TCP_SEGS_FREED,
// end
STAT_TYPE_MAX,
};
const char *name[] = {
// device packet
"pkts_rx",
"bytes_rx",
"pkts_tx",
"bytes_tx",
// keep-alive packet
"keep_alive_pkts",
"keep_alive_bytes",
// raw packet
"raw_pkts_rx",
"raw_bytes_rx",
"raw_pkts_tx",
"raw_bytes_tx",
// drop packet
"pkts_dropped",
"bytes_dropped",
// inject packet
"pkts_injected",
"bytes_injected",
// ctrl packet
"ctrl_pkts_rx",
"ctrl_bytes_rx",
"ctrl_pkts_tx",
"ctrl_bytes_tx",
// ipv4 reassembly
"ip4_defrags_expected",
"ip4_defrags_succeed",
"ip4_defrags_failed_timeout",
"ip4_defrags_failed_invalid_length",
"ip4_defrags_failed_overlap",
"ip4_defrags_failed_too_many_frag",
"ip4_frags",
"ip4_frags_freed",
"ip4_frags_buffered",
"ip4_frags_bypass_no_buffer",
"ip4_frags_bypass_dup_fist_frag",
"ip4_frags_bypass_dup_last_frag",
// ipv6 reassembly
"ip6_defrags_expected",
"ip6_defrags_succeed",
"ip6_defrags_failed_timeout",
"ip6_defrags_failed_invalid_length",
"ip6_defrags_failed_overlap",
"ip6_defrags_failed_too_many_frag",
"ip6_frags",
"ip6_frags_freed",
"ip6_frags_buffered",
"ip6_frags_bypass_no_buffer",
"ip6_frags_bypass_dup_fist_frag",
"ip6_frags_bypass_dup_last_frag",
// TCP session
"history_tcp_sessions",
"tcp_sess_used",
"tcp_sess_opening",
"tcp_sess_active",
"tcp_sess_closing",
"tcp_sess_discard",
"tcp_sess_closed",
// UDP session
"history_udp_sessions",
"udp_sess_used",
"udp_sess_opening",
"udp_sess_active",
"udp_sess_closing",
"udp_sess_discard",
"udp_sess_closed",
// Evicted session
"tcp_sess_evicted",
"udp_sess_evicted",
// Packet
"udp_pkts_bypass_table_full",
"tcp_pkts_bypass_table_full",
"tcp_pkts_bypass_session_not_found",
"tcp_pkts_bypass_duplicated",
"udp_pkts_bypass_duplicated",
"udp_pkts_bypass_session_evicted",
// TCP segments
"tcp_segs_input",
"tcp_segs_consumed",
"tcp_segs_timeout",
"tcp_segs_retransmited",
"tcp_segs_overlapped",
"tcp_segs_omitted_too_many",
"tcp_segs_inorder",
"tcp_segs_reordered",
"tcp_segs_buffered",
"tcp_segs_freed",
};
/*
* This naming convention maintains consistency by using a clear, structured pattern:
* • tcp_sessions_ or ipv6_frags_ as the prefix to indicate the type of data.
* • Action or status (e.g., bypassed, active, dropped) as the middle part.
* • Cause or condition (e.g., full_table, buffer_limit) as the suffix for additional clarity.
*/
struct stellar_stat
{
struct stellar_stat_config cfg;
struct fieldstat_easy *fs;
uint64_t last_merge_stat_ts;
uint64_t last_output_stat_ts;
int flag[MAX_THREAD_NUM]; // IS_FREE or IS_BUSY
struct thread_stat thr_stat[MAX_THREAD_NUM];
uint64_t stat_idx[STAT_TYPE_MAX];
uint64_t stat_val[STAT_TYPE_MAX];
};
uint64_t get_stat_value_by_idx(const struct thread_stat *thr_stat, size_t idx)
{
switch (idx)
{
// device packet
case STAT_TYPE_PKTS_RX:
return thr_stat->pkt_io->pkts_rx;
case STAT_TYPE_BYTES_RX:
return thr_stat->pkt_io->bytes_rx;
case STAT_TYPE_PKTS_TX:
return thr_stat->pkt_io->pkts_tx;
case STAT_TYPE_BYTES_TX:
return thr_stat->pkt_io->bytes_tx;
// keep-alive packet
case STAT_TYPE_KEEP_ALIVE_PKTS:
return thr_stat->pkt_io->keep_alive_pkts;
case STAT_TYPE_KEEP_ALIVE_BYTES:
return thr_stat->pkt_io->keep_alive_bytes;
// raw packet
case STAT_TYPE_RAW_PKTS_RX:
return thr_stat->pkt_io->raw_pkts_rx;
case STAT_TYPE_RAW_BYTES_RX:
return thr_stat->pkt_io->raw_bytes_rx;
case STAT_TYPE_RAW_PKTS_TX:
return thr_stat->pkt_io->raw_pkts_tx;
case STAT_TYPE_RAW_BYTES_TX:
return thr_stat->pkt_io->raw_bytes_tx;
// drop packet
case STAT_TYPE_PKTS_DROPPED:
return thr_stat->pkt_io->pkts_dropped;
case STAT_TYPE_BYTES_DROPPED:
return thr_stat->pkt_io->bytes_dropped;
// inject packet
case STAT_TYPE_PKTS_INJECTED:
return thr_stat->pkt_io->pkts_injected;
case STAT_TYPE_BYTES_INJECTED:
return thr_stat->pkt_io->bytes_injected;
// ctrl packet
case STAT_TYPE_CTRL_PKTS_RX:
return thr_stat->pkt_io->ctrl_pkts_rx;
case STAT_TYPE_CTRL_BYTES_RX:
return thr_stat->pkt_io->ctrl_bytes_rx;
case STAT_TYPE_CTRL_PKTS_TX:
return thr_stat->pkt_io->ctrl_pkts_tx;
case STAT_TYPE_CTRL_BYTES_TX:
return thr_stat->pkt_io->ctrl_bytes_tx;
// ipv4 reassembly
case STAT_TYPE_IP4_DEFRAGS_EXPECTED:
return thr_stat->ip_reass->ip4_defrags_expected;
case STAT_TYPE_IP4_DEFRAGS_SUCCEED:
return thr_stat->ip_reass->ip4_defrags_succeed;
case STAT_TYPE_IP4_DEFRAGS_FAILED_TIMEOUT:
return thr_stat->ip_reass->ip4_defrags_failed_timeout;
case STAT_TYPE_IP4_DEFRAGS_FAILED_INVALID_LENGTH:
return thr_stat->ip_reass->ip4_defrags_failed_invalid_length;
case STAT_TYPE_IP4_DEFRAGS_FAILED_OVERLAP:
return thr_stat->ip_reass->ip4_defrags_failed_overlap;
case STAT_TYPE_IP4_DEFRAGS_FAILED_TOO_MANY_FRAG:
return thr_stat->ip_reass->ip4_defrags_failed_too_many_frag;
case STAT_TYPE_IP4_FRAGS:
return thr_stat->ip_reass->ip4_frags;
case STAT_TYPE_IP4_FRAGS_FREED:
return thr_stat->ip_reass->ip4_frags_freed;
case STAT_TYPE_IP4_FRAGS_BUFFERED:
return thr_stat->ip_reass->ip4_frags_buffered;
case STAT_TYPE_IP4_FRAGS_BYPASS_NO_BUFFER:
return thr_stat->ip_reass->ip4_frags_bypass_no_buffer;
case STAT_TYPE_IP4_FRAGS_BYPASS_DUP_FIST_FRAG:
return thr_stat->ip_reass->ip4_frags_bypass_dup_fist_frag;
case STAT_TYPE_IP4_FRAGS_BYPASS_DUP_LAST_FRAG:
return thr_stat->ip_reass->ip4_frags_bypass_dup_last_frag;
// ipv6 reassembly
case STAT_TYPE_IP6_DEFRAGS_EXPECTED:
return thr_stat->ip_reass->ip6_defrags_expected;
case STAT_TYPE_IP6_DEFRAGS_SUCCEED:
return thr_stat->ip_reass->ip6_defrags_succeed;
case STAT_TYPE_IP6_DEFRAGS_FAILED_TIMEOUT:
return thr_stat->ip_reass->ip6_defrags_failed_timeout;
case STAT_TYPE_IP6_DEFRAGS_FAILED_INVALID_LENGTH:
return thr_stat->ip_reass->ip6_defrags_failed_invalid_length;
case STAT_TYPE_IP6_DEFRAGS_FAILED_OVERLAP:
return thr_stat->ip_reass->ip6_defrags_failed_overlap;
case STAT_TYPE_IP6_DEFRAGS_FAILED_TOO_MANY_FRAG:
return thr_stat->ip_reass->ip6_defrags_failed_too_many_frag;
case STAT_TYPE_IP6_FRAGS:
return thr_stat->ip_reass->ip6_frags;
case STAT_TYPE_IP6_FRAGS_FREED:
return thr_stat->ip_reass->ip6_frags_freed;
case STAT_TYPE_IP6_FRAGS_BUFFERED:
return thr_stat->ip_reass->ip6_frags_buffered;
case STAT_TYPE_IP6_FRAGS_BYPASS_NO_BUFFER:
return thr_stat->ip_reass->ip6_frags_bypass_no_buffer;
case STAT_TYPE_IP6_FRAGS_BYPASS_DUP_FIST_FRAG:
return thr_stat->ip_reass->ip6_frags_bypass_dup_fist_frag;
case STAT_TYPE_IP6_FRAGS_BYPASS_DUP_LAST_FRAG:
return thr_stat->ip_reass->ip6_frags_bypass_dup_last_frag;
// TCP session
case STAT_TYPE_HISTORY_TCP_SESSIONS:
return thr_stat->sess_mgr->history_tcp_sessions;
case STAT_TYPE_TCP_SESS_USED:
return thr_stat->sess_mgr->tcp_sess_used;
case STAT_TYPE_TCP_SESS_OPENING:
return thr_stat->sess_mgr->tcp_sess_opening;
case STAT_TYPE_TCP_SESS_ACTIVE:
return thr_stat->sess_mgr->tcp_sess_active;
case STAT_TYPE_TCP_SESS_CLOSING:
return thr_stat->sess_mgr->tcp_sess_closing;
case STAT_TYPE_TCP_SESS_DISCARD:
return thr_stat->sess_mgr->tcp_sess_discard;
case STAT_TYPE_TCP_SESS_CLOSED:
return thr_stat->sess_mgr->tcp_sess_closed;
// UDP session
case STAT_TYPE_HISTORY_UDP_SESSIONS:
return thr_stat->sess_mgr->history_udp_sessions;
case STAT_TYPE_UDP_SESS_USED:
return thr_stat->sess_mgr->udp_sess_used;
case STAT_TYPE_UDP_SESS_OPENING:
return thr_stat->sess_mgr->udp_sess_opening;
case STAT_TYPE_UDP_SESS_ACTIVE:
return thr_stat->sess_mgr->udp_sess_active;
case STAT_TYPE_UDP_SESS_CLOSING:
return thr_stat->sess_mgr->udp_sess_closing;
case STAT_TYPE_UDP_SESS_DISCARD:
return thr_stat->sess_mgr->udp_sess_discard;
case STAT_TYPE_UDP_SESS_CLOSED:
return thr_stat->sess_mgr->udp_sess_closed;
// Evicted session
case STAT_TYPE_TCP_SESS_EVICTED:
return thr_stat->sess_mgr->tcp_sess_evicted;
case STAT_TYPE_UDP_SESS_EVICTED:
return thr_stat->sess_mgr->udp_sess_evicted;
// Packet
case STAT_TYPE_UDP_PKTS_BYPASS_TABLE_FULL:
return thr_stat->sess_mgr->udp_pkts_bypass_table_full;
case STAT_TYPE_TCP_PKTS_BYPASS_TABLE_FULL:
return thr_stat->sess_mgr->tcp_pkts_bypass_table_full;
case STAT_TYPE_TCP_PKTS_BYPASS_SESSION_NOT_FOUND:
return thr_stat->sess_mgr->tcp_pkts_bypass_session_not_found;
case STAT_TYPE_TCP_PKTS_BYPASS_DUPLICATED:
return thr_stat->sess_mgr->tcp_pkts_bypass_duplicated;
case STAT_TYPE_UDP_PKTS_BYPASS_DUPLICATED:
return thr_stat->sess_mgr->udp_pkts_bypass_duplicated;
case STAT_TYPE_UDP_PKTS_BYPASS_SESSION_EVICTED:
return thr_stat->sess_mgr->udp_pkts_bypass_session_evicted;
// TCP segments
case STAT_TYPE_TCP_SEGS_INPUT:
return thr_stat->sess_mgr->tcp_segs_input;
case STAT_TYPE_TCP_SEGS_CONSUMED:
return thr_stat->sess_mgr->tcp_segs_consumed;
case STAT_TYPE_TCP_SEGS_TIMEOUT:
return thr_stat->sess_mgr->tcp_segs_timeout;
case STAT_TYPE_TCP_SEGS_RETRANSMITED:
return thr_stat->sess_mgr->tcp_segs_retransmited;
case STAT_TYPE_TCP_SEGS_OVERLAPPED:
return thr_stat->sess_mgr->tcp_segs_overlapped;
case STAT_TYPE_TCP_SEGS_OMITTED_TOO_MANY:
return thr_stat->sess_mgr->tcp_segs_omitted_too_many;
case STAT_TYPE_TCP_SEGS_INORDER:
return thr_stat->sess_mgr->tcp_segs_inorder;
case STAT_TYPE_TCP_SEGS_REORDERED:
return thr_stat->sess_mgr->tcp_segs_reordered;
case STAT_TYPE_TCP_SEGS_BUFFERED:
return thr_stat->sess_mgr->tcp_segs_buffered;
case STAT_TYPE_TCP_SEGS_FREED:
return thr_stat->sess_mgr->tcp_segs_freed;
default:
assert(0);
return 0;
}
}
struct stellar_stat_config *stellar_stat_config_new(const char *toml_file)
{
if (toml_file == NULL)
{
return NULL;
}
struct stellar_stat_config *cfg = (struct stellar_stat_config *)calloc(1, sizeof(struct stellar_stat_config));
if (cfg == NULL)
{
return NULL;
}
int ret = 0;
ret += load_and_validate_toml_integer_config(toml_file, "packet_io.nr_worker_thread", (uint64_t *)&cfg->nr_worker_thread, 1, MAX_THREAD_NUM);
ret += load_and_validate_toml_integer_config(toml_file, "stat.merge_interval_ms", (uint64_t *)&cfg->merge_interval_ms, 0, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "stat.output_interval_ms", (uint64_t *)&cfg->output_interval_ms, 0, 60000);
if (ret != 0)
{
stellar_stat_config_free(cfg);
return NULL;
}
return cfg;
}
void stellar_stat_config_free(struct stellar_stat_config *cfg)
{
if (cfg)
{
free(cfg);
cfg = NULL;
}
}
void stellar_stat_config_print(const struct stellar_stat_config *cfg)
{
if (cfg)
{
STAT_LOG_INFO("stat.merge_interval_ms : %lu", cfg->merge_interval_ms);
STAT_LOG_INFO("stat.output_interval_ms : %lu", cfg->output_interval_ms);
}
}
// python3 -m pip install prettytable
// python3 -m pip install jinja2
// /opt/MESA/bin/fieldstat_exporter.py local -j log/stellar_fs4.json -e -l --clear-screen
struct stellar_stat *stellar_stat_new(const struct stellar_stat_config *cfg, uint64_t now_ms)
{
struct stellar_stat *stat = (struct stellar_stat *)calloc(1, sizeof(struct stellar_stat));
if (stat == NULL)
{
return NULL;
}
memcpy(&stat->cfg, cfg, sizeof(struct stellar_stat_config));
stat->fs = fieldstat_easy_new(1, "stellar", NULL, 0);
if (stat->fs == NULL)
{
STAT_LOG_ERROR("failed to create fieldstat_easy");
goto error_out;
}
for (int i = 0; i < MAX_THREAD_NUM; i++)
{
stat->flag[i] = IS_FREE;
}
for (size_t i = 0; i < STAT_TYPE_MAX; i++)
{
stat->stat_idx[i] = fieldstat_easy_register_counter(stat->fs, name[i]);
}
stat->last_merge_stat_ts = now_ms;
stat->last_output_stat_ts = now_ms;
return stat;
error_out:
stellar_stat_free(stat);
return NULL;
}
void stellar_stat_free(struct stellar_stat *stat)
{
if (stat)
{
if (stat->fs)
{
fieldstat_easy_free(stat->fs);
stat->fs = NULL;
}
free(stat);
stat = NULL;
}
}
void stellar_stat_output(struct stellar_stat *stat, uint64_t now_ms)
{
if (now_ms - stat->last_output_stat_ts < stat->cfg.output_interval_ms)
{
return;
}
stat->last_output_stat_ts = now_ms;
for (uint16_t i = 0; i < stat->cfg.nr_worker_thread; i++)
{
if (ATOMIC_READ(&(stat->flag[i])) == IS_BUSY)
{
struct thread_stat *thr_stat = &stat->thr_stat[i];
for (size_t j = 0; j < STAT_TYPE_MAX; j++)
{
stat->stat_val[j] += get_stat_value_by_idx(thr_stat, j);
}
memset(thr_stat, 0, sizeof(struct thread_stat));
ATOMIC_SET(&(stat->flag[i]), IS_FREE);
}
}
for (size_t j = 0; j < STAT_TYPE_MAX; j++)
{
fieldstat_easy_counter_set(stat->fs, 0, stat->stat_idx[j], NULL, 0, stat->stat_val[j]);
}
char *buff;
size_t len;
fieldstat_easy_output(stat->fs, &buff, &len);
if (buff)
{
FILE *fp = fopen("./log/stellar_fs4.json", "w+");
if (fp == NULL)
{
STAT_LOG_ERROR("failed to open file: ./log/stellar_fs4.json, %s", strerror(errno));
}
else
{
fwrite(buff, len, 1, fp);
fflush(fp);
fclose(fp);
}
free(buff);
}
for (size_t j = 0; j < STAT_TYPE_MAX; j++)
{
stat->stat_val[j] = 0;
}
}
void stellar_stat_merge(struct stellar_stat *stat, const struct thread_stat *thr_stat, uint16_t thr_idx, uint64_t now_ms)
{
if (now_ms - stat->last_merge_stat_ts < stat->cfg.merge_interval_ms)
{
return;
}
if (ATOMIC_READ(&(stat->flag[thr_idx])) == IS_FREE)
{
memcpy(&stat->thr_stat[thr_idx], thr_stat, sizeof(struct thread_stat));
ATOMIC_SET(&(stat->flag[thr_idx]), IS_BUSY);
stat->last_merge_stat_ts = now_ms;
}
}
void stellar_stat_print(struct stellar_stat *stat __attribute__((unused)), const struct thread_stat *thr_stat, uint16_t thr_idx)
{
for (size_t i = 0; i < STAT_TYPE_MAX; i++)
{
STAT_LOG_INFO("worker thread %lu => %-34s: %lu", thr_idx, name[i], get_stat_value_by_idx(thr_stat, i));
}
}

View File

@@ -1,39 +0,0 @@
#pragma once
#ifdef __cplusplus
extern "C"
{
#endif
#include "packet_io.h"
#include "ip_reassembly.h"
#include "session_manager_runtime.h"
struct thread_stat
{
struct packet_io_stat *pkt_io;
struct ip_reassembly_stat *ip_reass;
struct session_manager_stat *sess_mgr;
};
struct stellar_stat_config
{
uint16_t nr_worker_thread; // range [1, MAX_THREAD_NUM]
uint64_t merge_interval_ms; // range: [0, 60000] (ms)
uint64_t output_interval_ms; // range: [0, 60000] (ms)
};
struct stellar_stat_config *stellar_stat_config_new(const char *toml_file);
void stellar_stat_config_free(struct stellar_stat_config *cfg);
void stellar_stat_config_print(const struct stellar_stat_config *cfg);
struct stellar_stat;
struct stellar_stat *stellar_stat_new(const struct stellar_stat_config *cfg, uint64_t now_ms);
void stellar_stat_free(struct stellar_stat *stat);
void stellar_stat_output(struct stellar_stat *stat, uint64_t now_ms);
void stellar_stat_merge(struct stellar_stat *stat, const struct thread_stat *thr_stat, uint16_t thr_idx, uint64_t now_ms);
void stellar_stat_print(struct stellar_stat *stat, const struct thread_stat *thr_stat, uint16_t thr_idx);
#ifdef __cplusplus
}
#endif

View File

@@ -104,7 +104,7 @@ static inline void hexdump_to_fd(int fd, uint32_t idx, const char *data, uint16_
}
// key: "a.b.c"
static inline const char *get_toml_value_from_hierarchical_key(toml_table_t *root, const char *key)
static inline const char *get_toml_raw_by_hierarchical_key(toml_table_t *root, const char *key)
{
toml_table_t *table = root;
@@ -135,7 +135,38 @@ static inline const char *get_toml_value_from_hierarchical_key(toml_table_t *roo
return toml_raw_in(root, key);
}
static inline int load_and_validate_toml_integer_config(const char *toml_file, const char *key, uint64_t *val, uint64_t min, uint64_t max)
static inline toml_array_t *get_toml_array_by_hierarchical_key(toml_table_t *root, const char *key)
{
toml_table_t *table = root;
char *saveptr;
char *dup_key = strdup(key);
char *token = strtok_r(dup_key, ".", &saveptr);
while (token != NULL)
{
table = toml_table_in(table, token);
if (table == NULL)
{
free(dup_key);
return NULL;
}
if (strchr(saveptr, '.') == NULL)
{
toml_array_t *arr = toml_array_in(table, saveptr);
free(dup_key);
return arr;
}
token = strtok_r(NULL, ".", &saveptr);
}
free(dup_key);
return toml_array_in(root, key);
}
static inline int load_toml_integer_config(const char *toml_file, const char *key, uint64_t *val, uint64_t min, uint64_t max)
{
int ret = -1;
char errbuf[200];
@@ -157,7 +188,7 @@ static inline int load_and_validate_toml_integer_config(const char *toml_file, c
goto error_out;
}
ptr = get_toml_value_from_hierarchical_key(root, key);
ptr = get_toml_raw_by_hierarchical_key(root, key);
if (ptr == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file missing %s", key);
@@ -184,7 +215,7 @@ error_out:
return ret;
}
static inline int load_and_validate_toml_double_config(const char *toml_file, const char *key, double *val, double min, double max)
static inline int load_toml_double_config(const char *toml_file, const char *key, double *val, double min, double max)
{
int ret = -1;
char errbuf[200];
@@ -206,7 +237,7 @@ static inline int load_and_validate_toml_double_config(const char *toml_file, co
goto error_out;
}
ptr = get_toml_value_from_hierarchical_key(root, key);
ptr = get_toml_raw_by_hierarchical_key(root, key);
if (ptr == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file missing %s", key);
@@ -233,6 +264,104 @@ error_out:
return ret;
}
static inline int load_toml_str_config(const char *toml_file, const char *key, char *val)
{
int ret = -1;
char errbuf[200];
const char *ptr = NULL;
FILE *fp = NULL;
toml_table_t *root = NULL;
fp = fopen(toml_file, "r");
if (fp == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file %s open failed, %s", toml_file, strerror(errno));
return -1;
}
root = toml_parse_file(fp, errbuf, sizeof(errbuf));
if (root == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file %s parse failed, %s", toml_file, errbuf);
goto error_out;
}
ptr = get_toml_raw_by_hierarchical_key(root, key);
if (ptr == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file missing %s", key);
goto error_out;
}
memcpy(val, ptr + 1, strlen(ptr) - 2);
ret = 0;
error_out:
if (root != NULL)
{
toml_free(root);
}
if (fp)
{
fclose(fp);
}
return ret;
}
static inline int load_toml_array_config(const char *toml_file, const char *key, uint64_t val[], uint64_t size)
{
int ret = 0;
char errbuf[200];
const char *ptr = NULL;
toml_array_t *arr = NULL;
FILE *fp = NULL;
toml_table_t *root = NULL;
uint64_t loop = 0;
fp = fopen(toml_file, "r");
if (fp == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file %s open failed, %s", toml_file, strerror(errno));
return -1;
}
root = toml_parse_file(fp, errbuf, sizeof(errbuf));
if (root == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file %s parse failed, %s", toml_file, errbuf);
goto error_out;
}
arr = get_toml_array_by_hierarchical_key(root, key);
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file missing %s", key);
goto error_out;
}
loop = MIN((uint64_t)toml_array_nelem(arr), size);
for (uint64_t i = 0; i < loop; i++)
{
ptr = toml_raw_at(arr, i);
if (ptr == NULL)
{
STELLAR_LOG_ERROR(__thread_local_logger, "config", "config file missing %s[%d]", key, i);
goto error_out;
}
val[i] = atoll(ptr);
}
ret = loop;
error_out:
if (root != NULL)
{
toml_free(root);
}
if (fp)
{
fclose(fp);
}
return ret;
}
#ifdef __cplusplus
}
#endif

View File

@@ -36,18 +36,8 @@ global:
session_get_stat;
session_get0_readable_addr;
session_set_discard;
session_manager_new_session_exdata_index;
session_get_exdata;
session_set_exdata;
session_manager_subscribe_tcp;
session_manager_subscribe_udp;
session_manager_subscribe_tcp_stream;
stellar_session_plugin_register;
stellar_session_plugin_register_with_hooks;
stellar_session_plugin_dettach_current_session;
stellar_packet_plugin_register;
stellar_polling_plugin_register;
stellar_new;
stellar_run;
@@ -67,9 +57,18 @@ global:
packet_manager_on_init;
packet_manager_on_exit;
packet_manager_new_packet_exdata_index;
packet_manager_subscribe;
packet_manager_claim_packet;
packet_manager_schedule_packet;
session_manager_on_init;
session_manager_on_exit;
session_manager_new_session_exdata_index;
session_manager_subscribe_tcp;
session_manager_subscribe_udp;
session_manager_subscribe_control_packet;
session_manager_subscribe_tcp_stream;
http_message_*;
http_decoder_init;

View File

@@ -6,8 +6,8 @@
#include <limits.h>
#include <pthread.h>
#include "utils.h"
#include "packet_dump.h"
#include "utils_internal.h"
#include "session_internal.h"
#include "stellar/log.h"
@@ -287,7 +287,6 @@ struct stellar_module *session_debugger_on_init(struct stellar_module_manager *m
{
assert(mod_mgr);
struct session_manager *sess_mgr = stellar_module_get_session_manager(mod_mgr);
assert(sess_mgr);
struct logger *logger = stellar_module_manager_get_logger(mod_mgr);