TSG-18285 TFE的Packet IO模块支持重复流量识别
This commit is contained in:
712
common/src/dablooms.cpp
Normal file
712
common/src/dablooms.cpp
Normal file
@@ -0,0 +1,712 @@
|
||||
/* Copyright @2012 by Justin Hines at Bitly under a very liberal license. See LICENSE in the source distribution. */
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdlib.h>
|
||||
#include <fcntl.h>
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <time.h>
|
||||
#include "murmur.h"
|
||||
#include "dablooms.h"
|
||||
|
||||
#define DABLOOMS_VERSION "0.9.1"
|
||||
|
||||
#define ERROR_TIGHTENING_RATIO 0.5
|
||||
#define SALT_CONSTANT 0x97c29b3a
|
||||
|
||||
#define ALLOC(type, number) ((type *)calloc(sizeof(type), number))
|
||||
#define FREE(p) \
|
||||
{ \
|
||||
free(*p); \
|
||||
*p = NULL; \
|
||||
}
|
||||
|
||||
const char *dablooms_version(void)
|
||||
{
|
||||
return DABLOOMS_VERSION;
|
||||
}
|
||||
|
||||
void free_bitmap(bitmap_t *bitmap)
|
||||
{
|
||||
#if 0
|
||||
if ((munmap(bitmap->array, bitmap->bytes)) < 0) {
|
||||
perror("Error, unmapping memory");
|
||||
}
|
||||
#else
|
||||
free(bitmap->array);
|
||||
#endif
|
||||
free(bitmap);
|
||||
}
|
||||
|
||||
bitmap_t *bitmap_resize(bitmap_t *bitmap, size_t old_size, size_t new_size)
|
||||
{
|
||||
|
||||
#if 0
|
||||
/* resize if mmap exists and possible on this os, else new mmap */
|
||||
if (bitmap->array != NULL) {
|
||||
#if __linux
|
||||
bitmap->array = mremap(bitmap->array, old_size, new_size, MREMAP_MAYMOVE);
|
||||
if (bitmap->array == MAP_FAILED) {
|
||||
perror("Error resizing mmap");
|
||||
free_bitmap(bitmap);
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
if (munmap(bitmap->array, bitmap->bytes) < 0) {
|
||||
perror("Error unmapping memory");
|
||||
free_bitmap(bitmap);
|
||||
return NULL;
|
||||
}
|
||||
bitmap->array = NULL;
|
||||
#endif
|
||||
}
|
||||
if (bitmap->array == NULL) {
|
||||
bitmap->array = mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
|
||||
if (bitmap->array == MAP_FAILED) {
|
||||
perror("Error init mmap");
|
||||
free_bitmap(bitmap);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (bitmap->array != NULL)
|
||||
{
|
||||
bitmap->array = (char *)realloc(bitmap->array, new_size);
|
||||
if (bitmap->array == NULL)
|
||||
{
|
||||
perror("Error resizing memory");
|
||||
free_bitmap(bitmap);
|
||||
return NULL;
|
||||
}
|
||||
memset(bitmap->array + old_size, 0, new_size - old_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
bitmap->array = (char *)malloc(new_size);
|
||||
if (bitmap->array == NULL)
|
||||
{
|
||||
perror("Error init memory");
|
||||
free_bitmap(bitmap);
|
||||
return NULL;
|
||||
}
|
||||
memset(bitmap->array, 0, new_size);
|
||||
}
|
||||
#endif
|
||||
bitmap->bytes = new_size;
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
/* Create a new bitmap, not full featured, simple to give
|
||||
* us a means of interacting with the 4 bit counters */
|
||||
bitmap_t *new_bitmap(size_t bytes)
|
||||
{
|
||||
bitmap_t *bitmap;
|
||||
|
||||
if ((bitmap = (bitmap_t *)malloc(sizeof(bitmap_t))) == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bitmap->bytes = bytes;
|
||||
bitmap->array = NULL;
|
||||
|
||||
if ((bitmap = bitmap_resize(bitmap, 0, bytes)) == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
int bitmap_increment(bitmap_t *bitmap, unsigned int index, long offset)
|
||||
{
|
||||
long access = index / 2 + offset;
|
||||
uint8_t temp;
|
||||
__builtin_prefetch(&(bitmap->array[access]), 0, 1);
|
||||
uint8_t n = bitmap->array[access];
|
||||
if (index % 2 != 0)
|
||||
{
|
||||
temp = (n & 0x0f);
|
||||
n = (n & 0xf0) + ((n & 0x0f) + 0x01);
|
||||
}
|
||||
else
|
||||
{
|
||||
temp = (n & 0xf0) >> 4;
|
||||
n = (n & 0x0f) + ((n & 0xf0) + 0x10);
|
||||
}
|
||||
|
||||
if (temp == 0x0f)
|
||||
{
|
||||
// fprintf(stderr, "Error, 4 bit int Overflow\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
__builtin_prefetch(&(bitmap->array[access]), 1, 1);
|
||||
bitmap->array[access] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* increments the four bit counter */
|
||||
int bitmap_decrement(bitmap_t *bitmap, unsigned int index, long offset)
|
||||
{
|
||||
long access = index / 2 + offset;
|
||||
uint8_t temp;
|
||||
uint8_t n = bitmap->array[access];
|
||||
|
||||
if (index % 2 != 0)
|
||||
{
|
||||
temp = (n & 0x0f);
|
||||
n = (n & 0xf0) + ((n & 0x0f) - 0x01);
|
||||
}
|
||||
else
|
||||
{
|
||||
temp = (n & 0xf0) >> 4;
|
||||
n = (n & 0x0f) + ((n & 0xf0) - 0x10);
|
||||
}
|
||||
|
||||
if (temp == 0x00)
|
||||
{
|
||||
// fprintf(stderr, "Error, Decrementing zero\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
bitmap->array[access] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* decrements the four bit counter */
|
||||
int bitmap_check(bitmap_t *bitmap, unsigned int index, long offset)
|
||||
{
|
||||
long access = index / 2 + offset;
|
||||
if (index % 2 != 0)
|
||||
{
|
||||
return bitmap->array[access] & 0x0f;
|
||||
}
|
||||
else
|
||||
{
|
||||
return bitmap->array[access] & 0xf0;
|
||||
}
|
||||
}
|
||||
|
||||
int bitmap_flush(bitmap_t *bitmap)
|
||||
{
|
||||
#if 0
|
||||
if ((msync(bitmap->array, bitmap->bytes, MS_SYNC) < 0)) {
|
||||
perror("Error, flushing bitmap to disk");
|
||||
return -1;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the actual hashing for `key`
|
||||
*
|
||||
* Only call the hash once to get a pair of initial values (h1 and
|
||||
* h2). Use these values to generate all hashes in a quick loop.
|
||||
*
|
||||
* See paper by Kirsch, Mitzenmacher [2006]
|
||||
* http://www.eecs.harvard.edu/~michaelm/postscripts/rsa2008.pdf
|
||||
*/
|
||||
void hash_func(counting_bloom_t *bloom, const char *key, size_t key_len, uint32_t *hashes)
|
||||
{
|
||||
uint32_t checksum[4];
|
||||
|
||||
MurmurHash3_x64_128(key, key_len, SALT_CONSTANT, checksum);
|
||||
uint32_t h1 = checksum[0];
|
||||
uint32_t h2 = checksum[1];
|
||||
|
||||
for (size_t i = 0; i < bloom->nfuncs; i++)
|
||||
{
|
||||
hashes[i] = (h1 + i * h2) % bloom->counts_per_func;
|
||||
}
|
||||
}
|
||||
|
||||
int free_counting_bloom(counting_bloom_t *bloom)
|
||||
{
|
||||
if (bloom != NULL)
|
||||
{
|
||||
free(bloom->hashes);
|
||||
bloom->hashes = NULL;
|
||||
free_bitmap(bloom->bitmap);
|
||||
free(bloom);
|
||||
bloom = NULL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
counting_bloom_t *counting_bloom_init(unsigned int capacity, double error_rate, long offset)
|
||||
{
|
||||
counting_bloom_t *bloom;
|
||||
|
||||
if ((bloom = (counting_bloom_t *)malloc(sizeof(counting_bloom_t))) == NULL)
|
||||
{
|
||||
fprintf(stderr, "Error, could not realloc a new bloom filter\n");
|
||||
return NULL;
|
||||
}
|
||||
bloom->bitmap = NULL;
|
||||
bloom->capacity = capacity;
|
||||
bloom->error_rate = error_rate;
|
||||
bloom->offset = offset + sizeof(counting_bloom_header_t);
|
||||
bloom->nfuncs = (int)ceil(log(1 / error_rate) / log(2));
|
||||
bloom->counts_per_func = (int)ceil(capacity * fabs(log(error_rate)) / (bloom->nfuncs * pow(log(2), 2)));
|
||||
bloom->size = bloom->nfuncs * bloom->counts_per_func;
|
||||
/* rounding-up integer divide by 2 of bloom->size */
|
||||
bloom->num_bytes = ((bloom->size + 1) / 2) + sizeof(counting_bloom_header_t);
|
||||
bloom->hashes = (uint32_t *)calloc(bloom->nfuncs, sizeof(uint32_t));
|
||||
|
||||
return bloom;
|
||||
}
|
||||
|
||||
counting_bloom_t *new_counting_bloom(unsigned int capacity, double error_rate)
|
||||
{
|
||||
counting_bloom_t *cur_bloom;
|
||||
|
||||
cur_bloom = counting_bloom_init(capacity, error_rate, 0);
|
||||
cur_bloom->bitmap = new_bitmap(cur_bloom->num_bytes);
|
||||
cur_bloom->header = (counting_bloom_header_t *)(cur_bloom->bitmap->array);
|
||||
return cur_bloom;
|
||||
}
|
||||
|
||||
int counting_bloom_add(counting_bloom_t *bloom, const char *s, size_t len)
|
||||
{
|
||||
unsigned int index, offset;
|
||||
unsigned int *hashes = bloom->hashes;
|
||||
|
||||
hash_func(bloom, s, len, hashes);
|
||||
|
||||
for (size_t i = 0; i < bloom->nfuncs; i++)
|
||||
{
|
||||
offset = i * bloom->counts_per_func;
|
||||
index = hashes[i] + offset;
|
||||
bitmap_increment(bloom->bitmap, index, bloom->offset);
|
||||
}
|
||||
bloom->header->count++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int counting_bloom_remove(counting_bloom_t *bloom, const char *s, size_t len)
|
||||
{
|
||||
unsigned int index, offset;
|
||||
unsigned int *hashes = bloom->hashes;
|
||||
|
||||
hash_func(bloom, s, len, hashes);
|
||||
|
||||
for (size_t i = 0; i < bloom->nfuncs; i++)
|
||||
{
|
||||
offset = i * bloom->counts_per_func;
|
||||
index = hashes[i] + offset;
|
||||
bitmap_decrement(bloom->bitmap, index, bloom->offset);
|
||||
}
|
||||
bloom->header->count--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int counting_bloom_check(counting_bloom_t *bloom, const char *s, size_t len)
|
||||
{
|
||||
unsigned int index, offset;
|
||||
unsigned int *hashes = bloom->hashes;
|
||||
|
||||
hash_func(bloom, s, len, hashes);
|
||||
|
||||
for (size_t i = 0; i < bloom->nfuncs; i++)
|
||||
{
|
||||
offset = i * bloom->counts_per_func;
|
||||
index = hashes[i] + offset;
|
||||
if (!(bitmap_check(bloom->bitmap, index, bloom->offset)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
int free_scaling_bloom(scaling_bloom_t *bloom)
|
||||
{
|
||||
int i;
|
||||
for (i = bloom->num_blooms - 1; i >= 0; i--)
|
||||
{
|
||||
free(bloom->blooms[i]->hashes);
|
||||
bloom->blooms[i]->hashes = NULL;
|
||||
free(bloom->blooms[i]);
|
||||
bloom->blooms[i] = NULL;
|
||||
}
|
||||
free(bloom->blooms);
|
||||
free_bitmap(bloom->bitmap);
|
||||
free(bloom);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* creates a new counting bloom filter from a given scaling bloom filter, with count and id */
|
||||
counting_bloom_t *new_counting_bloom_from_scale(scaling_bloom_t *bloom)
|
||||
{
|
||||
long offset;
|
||||
double error_rate;
|
||||
counting_bloom_t *cur_bloom;
|
||||
|
||||
error_rate = bloom->error_rate * (pow(ERROR_TIGHTENING_RATIO, bloom->num_blooms + 1));
|
||||
|
||||
if ((bloom->blooms = (counting_bloom_t **)realloc(bloom->blooms, (bloom->num_blooms + 1) * sizeof(counting_bloom_t *))) == NULL)
|
||||
{
|
||||
fprintf(stderr, "Error, could not realloc a new bloom filter\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
cur_bloom = counting_bloom_init(bloom->capacity, error_rate, bloom->num_bytes);
|
||||
bloom->blooms[bloom->num_blooms] = cur_bloom;
|
||||
|
||||
bloom->bitmap = bitmap_resize(bloom->bitmap, bloom->num_bytes, bloom->num_bytes + cur_bloom->num_bytes);
|
||||
|
||||
/* reset header pointer, as mmap may have moved */
|
||||
bloom->header = (scaling_bloom_header_t *)bloom->bitmap->array;
|
||||
|
||||
/* Set the pointers for these header structs to the right location since mmap may have moved */
|
||||
bloom->num_blooms++;
|
||||
for (unsigned int i = 0; i < bloom->num_blooms; i++)
|
||||
{
|
||||
offset = bloom->blooms[i]->offset - sizeof(counting_bloom_header_t);
|
||||
bloom->blooms[i]->header = (counting_bloom_header_t *)(bloom->bitmap->array + offset);
|
||||
}
|
||||
|
||||
bloom->num_bytes += cur_bloom->num_bytes;
|
||||
cur_bloom->bitmap = bloom->bitmap;
|
||||
|
||||
return cur_bloom;
|
||||
}
|
||||
|
||||
uint64_t scaling_bloom_clear_seqnums(scaling_bloom_t *bloom)
|
||||
{
|
||||
uint64_t seqnum;
|
||||
|
||||
if (bloom->header->disk_seqnum != 0)
|
||||
{
|
||||
// disk_seqnum cleared on disk before any other changes
|
||||
bloom->header->disk_seqnum = 0;
|
||||
bitmap_flush(bloom->bitmap);
|
||||
}
|
||||
seqnum = bloom->header->mem_seqnum;
|
||||
bloom->header->mem_seqnum = 0;
|
||||
return seqnum;
|
||||
}
|
||||
|
||||
int scaling_bloom_add(scaling_bloom_t *bloom, const char *s, size_t len, uint64_t id)
|
||||
{
|
||||
int i;
|
||||
uint64_t seqnum;
|
||||
|
||||
counting_bloom_t *cur_bloom = NULL;
|
||||
for (i = bloom->num_blooms - 1; i >= 0; i--)
|
||||
{
|
||||
cur_bloom = bloom->blooms[i];
|
||||
if (id >= cur_bloom->header->id)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
seqnum = scaling_bloom_clear_seqnums(bloom);
|
||||
|
||||
if ((id > bloom->header->max_id) && (cur_bloom->header->count >= cur_bloom->capacity - 1))
|
||||
{
|
||||
cur_bloom = new_counting_bloom_from_scale(bloom);
|
||||
cur_bloom->header->count = 0;
|
||||
cur_bloom->header->id = bloom->header->max_id + 1;
|
||||
}
|
||||
if (bloom->header->max_id < id)
|
||||
{
|
||||
bloom->header->max_id = id;
|
||||
}
|
||||
counting_bloom_add(cur_bloom, s, len);
|
||||
|
||||
bloom->header->mem_seqnum = seqnum + 1;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int scaling_bloom_remove(scaling_bloom_t *bloom, const char *s, size_t len, uint64_t id)
|
||||
{
|
||||
counting_bloom_t *cur_bloom;
|
||||
int i;
|
||||
uint64_t seqnum;
|
||||
|
||||
for (i = bloom->num_blooms - 1; i >= 0; i--)
|
||||
{
|
||||
cur_bloom = bloom->blooms[i];
|
||||
if (id >= cur_bloom->header->id)
|
||||
{
|
||||
seqnum = scaling_bloom_clear_seqnums(bloom);
|
||||
|
||||
counting_bloom_remove(cur_bloom, s, len);
|
||||
|
||||
bloom->header->mem_seqnum = seqnum + 1;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scaling_bloom_check(scaling_bloom_t *bloom, const char *s, size_t len)
|
||||
{
|
||||
int i;
|
||||
counting_bloom_t *cur_bloom;
|
||||
for (i = bloom->num_blooms - 1; i >= 0; i--)
|
||||
{
|
||||
cur_bloom = bloom->blooms[i];
|
||||
if (counting_bloom_check(cur_bloom, s, len))
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int scaling_bloom_flush(scaling_bloom_t *bloom)
|
||||
{
|
||||
if (bitmap_flush(bloom->bitmap) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
// all changes written to disk before disk_seqnum set
|
||||
if (bloom->header->disk_seqnum == 0)
|
||||
{
|
||||
bloom->header->disk_seqnum = bloom->header->mem_seqnum;
|
||||
return bitmap_flush(bloom->bitmap);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t scaling_bloom_mem_seqnum(scaling_bloom_t *bloom)
|
||||
{
|
||||
return bloom->header->mem_seqnum;
|
||||
}
|
||||
|
||||
uint64_t scaling_bloom_disk_seqnum(scaling_bloom_t *bloom)
|
||||
{
|
||||
return bloom->header->disk_seqnum;
|
||||
}
|
||||
|
||||
scaling_bloom_t *scaling_bloom_init(unsigned int capacity, double error_rate)
|
||||
{
|
||||
scaling_bloom_t *bloom;
|
||||
|
||||
if ((bloom = (scaling_bloom_t *)malloc(sizeof(scaling_bloom_t))) == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
if ((bloom->bitmap = new_bitmap(sizeof(scaling_bloom_header_t))) == NULL)
|
||||
{
|
||||
fprintf(stderr, "Error, Could not create bitmap with file\n");
|
||||
free_scaling_bloom(bloom);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bloom->header = (scaling_bloom_header_t *)bloom->bitmap->array;
|
||||
bloom->capacity = capacity;
|
||||
bloom->error_rate = error_rate;
|
||||
bloom->num_blooms = 0;
|
||||
bloom->num_bytes = sizeof(scaling_bloom_header_t);
|
||||
bloom->blooms = NULL;
|
||||
|
||||
return bloom;
|
||||
}
|
||||
|
||||
scaling_bloom_t *new_scaling_bloom(unsigned int capacity, double error_rate)
|
||||
{
|
||||
|
||||
scaling_bloom_t *bloom;
|
||||
counting_bloom_t *cur_bloom;
|
||||
|
||||
bloom = scaling_bloom_init(capacity, error_rate);
|
||||
|
||||
if (!(cur_bloom = new_counting_bloom_from_scale(bloom)))
|
||||
{
|
||||
fprintf(stderr, "Error, Could not create counting bloom\n");
|
||||
free_scaling_bloom(bloom);
|
||||
return NULL;
|
||||
}
|
||||
cur_bloom->header->count = 0;
|
||||
cur_bloom->header->id = 0;
|
||||
|
||||
bloom->header->mem_seqnum = 1;
|
||||
return bloom;
|
||||
}
|
||||
|
||||
struct expiry_dablooms_handle
|
||||
{
|
||||
scaling_bloom_t *cur_bloom;
|
||||
scaling_bloom_t *next_bloom;
|
||||
time_t cur_bloom_start;
|
||||
time_t next_bloom_start;
|
||||
time_t last_bloom_check;
|
||||
uint64_t cur_bloom_inc_id;
|
||||
uint64_t next_bloom_inc_id;
|
||||
unsigned int capacity;
|
||||
int expiry_time;
|
||||
time_t cur_time;
|
||||
double error_rate;
|
||||
};
|
||||
|
||||
char *expiry_dablooms_errno_trans(enum expiry_dablooms_errno _errno)
|
||||
{
|
||||
switch (_errno)
|
||||
{
|
||||
case EXPIRY_DABLOOMS_ERRNO_BLOOM_NULL:
|
||||
return (char *)"scaling_bloom_null";
|
||||
case EXPIRY_DABLOOMS_ERRNO_NEW_BLOOM_FAIL:
|
||||
return (char *)"new_scaling_bloom_fail";
|
||||
default:
|
||||
return (char *)"unknown";
|
||||
}
|
||||
}
|
||||
|
||||
void expiry_dablooms_destroy(struct expiry_dablooms_handle *handle)
|
||||
{
|
||||
if (handle != NULL)
|
||||
{
|
||||
if (handle->cur_bloom != NULL)
|
||||
{
|
||||
free_scaling_bloom(handle->cur_bloom);
|
||||
}
|
||||
if (handle->next_bloom != NULL)
|
||||
{
|
||||
free_scaling_bloom(handle->next_bloom);
|
||||
}
|
||||
FREE(&handle);
|
||||
}
|
||||
}
|
||||
|
||||
struct expiry_dablooms_handle *expiry_dablooms_init(unsigned int capacity, double error_rate, time_t cur_time, int expiry_time)
|
||||
{
|
||||
struct expiry_dablooms_handle *handle = ALLOC(struct expiry_dablooms_handle, 1);
|
||||
scaling_bloom_t *cur_bloom = new_scaling_bloom(capacity, error_rate);
|
||||
if (cur_bloom == NULL)
|
||||
{
|
||||
goto error_out;
|
||||
}
|
||||
handle->cur_bloom = cur_bloom;
|
||||
handle->cur_bloom_inc_id = 0;
|
||||
handle->cur_bloom_start = cur_time;
|
||||
handle->capacity = capacity;
|
||||
handle->error_rate = error_rate;
|
||||
handle->expiry_time = expiry_time;
|
||||
handle->cur_time = cur_time;
|
||||
return handle;
|
||||
|
||||
error_out:
|
||||
expiry_dablooms_destroy(handle);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int expiry_dablooms_element_count_get(struct expiry_dablooms_handle *handle, uint64_t *count)
|
||||
{
|
||||
if (handle == NULL || handle->cur_bloom == NULL)
|
||||
{
|
||||
return EXPIRY_DABLOOMS_ERRNO_BLOOM_NULL;
|
||||
}
|
||||
*count = handle->cur_bloom_inc_id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bloom_expired_check(struct expiry_dablooms_handle *handle, time_t cur_time)
|
||||
{
|
||||
if (handle == NULL || handle->cur_bloom == NULL)
|
||||
{
|
||||
return EXPIRY_DABLOOMS_ERRNO_BLOOM_NULL;
|
||||
}
|
||||
if (cur_time <= handle->last_bloom_check)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
time_t delta_time = cur_time - handle->cur_bloom_start;
|
||||
handle->cur_time = cur_time;
|
||||
if (delta_time >= handle->expiry_time)
|
||||
{
|
||||
free_scaling_bloom(handle->cur_bloom);
|
||||
if (handle->next_bloom != NULL)
|
||||
{
|
||||
handle->cur_bloom = handle->next_bloom;
|
||||
handle->cur_bloom_start = handle->next_bloom_start;
|
||||
handle->cur_bloom_inc_id = handle->next_bloom_inc_id;
|
||||
handle->next_bloom = NULL;
|
||||
handle->last_bloom_check = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
scaling_bloom_t *cur_bloom = new_scaling_bloom(handle->capacity, handle->error_rate);
|
||||
if (cur_bloom == NULL)
|
||||
{
|
||||
return EXPIRY_DABLOOMS_ERRNO_NEW_BLOOM_FAIL;
|
||||
}
|
||||
handle->cur_bloom = cur_bloom;
|
||||
handle->cur_bloom_inc_id = 0;
|
||||
handle->cur_bloom_start = cur_time;
|
||||
handle->last_bloom_check = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
handle->last_bloom_check = cur_time;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int expiry_dablooms_add(struct expiry_dablooms_handle *handle, const char *key, size_t len, time_t cur_time)
|
||||
{
|
||||
if (key == NULL || len == 0 || handle == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
int ret = bloom_expired_check(handle, cur_time);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
scaling_bloom_add(handle->cur_bloom, key, len, handle->cur_bloom_inc_id);
|
||||
handle->cur_bloom_inc_id++;
|
||||
time_t delta_time = cur_time - handle->cur_bloom_start;
|
||||
handle->cur_time = cur_time;
|
||||
if (delta_time >= handle->expiry_time)
|
||||
{
|
||||
if (handle->next_bloom == NULL)
|
||||
{
|
||||
scaling_bloom_t *next_bloom = new_scaling_bloom(handle->capacity, handle->error_rate);
|
||||
if (next_bloom == NULL)
|
||||
{
|
||||
return EXPIRY_DABLOOMS_ERRNO_NEW_BLOOM_FAIL;
|
||||
}
|
||||
handle->next_bloom = next_bloom;
|
||||
handle->next_bloom_inc_id = 0;
|
||||
handle->next_bloom_start = cur_time;
|
||||
}
|
||||
scaling_bloom_add(handle->next_bloom, key, len, handle->next_bloom_inc_id);
|
||||
handle->next_bloom_inc_id++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int expiry_dablooms_search(struct expiry_dablooms_handle *handle, const char *key, size_t len, time_t cur_time)
|
||||
{
|
||||
if (key == NULL || len == 0 || handle == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
int ret = bloom_expired_check(handle, cur_time);
|
||||
if (ret < 0)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
int bloom_hit = scaling_bloom_check(handle->cur_bloom, key, len);
|
||||
return bloom_hit;
|
||||
}
|
||||
153
common/src/murmur.cpp
Normal file
153
common/src/murmur.cpp
Normal file
@@ -0,0 +1,153 @@
|
||||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
||||
// algorithms are optimized for their respective platforms. You can still
|
||||
// compile and run any of them on any platform, but your performance with the
|
||||
// non-native version will be less than optimal.
|
||||
|
||||
#include "murmur.h"
|
||||
|
||||
#define FORCE_INLINE inline static
|
||||
|
||||
FORCE_INLINE uint64_t rotl64(uint64_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (64 - r));
|
||||
}
|
||||
|
||||
#define ROTL64(x, y) rotl64(x, y)
|
||||
|
||||
#define BIG_CONSTANT(x) (x##LLU)
|
||||
|
||||
#define getblock(x, i) (x[i])
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Finalization mix - force all bits of a hash block to avalanche
|
||||
|
||||
FORCE_INLINE uint64_t fmix64(uint64_t k)
|
||||
{
|
||||
k ^= k >> 33;
|
||||
k *= BIG_CONSTANT(0xff51afd7ed558ccd);
|
||||
k ^= k >> 33;
|
||||
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
|
||||
k ^= k >> 33;
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x64_128(const void *key, const int len, const uint32_t seed, void *out)
|
||||
{
|
||||
const uint8_t *data = (const uint8_t *)key;
|
||||
const int nblocks = len / 16;
|
||||
|
||||
uint64_t h1 = seed;
|
||||
uint64_t h2 = seed;
|
||||
|
||||
uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
|
||||
uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
|
||||
|
||||
int i;
|
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint64_t *blocks = (const uint64_t *)(data);
|
||||
|
||||
for (i = 0; i < nblocks; i++)
|
||||
{
|
||||
uint64_t k1 = getblock(blocks, i * 2 + 0);
|
||||
uint64_t k2 = getblock(blocks, i * 2 + 1);
|
||||
|
||||
k1 *= c1;
|
||||
k1 = ROTL64(k1, 31);
|
||||
k1 *= c2;
|
||||
h1 ^= k1;
|
||||
|
||||
h1 = ROTL64(h1, 27);
|
||||
h1 += h2;
|
||||
h1 = h1 * 5 + 0x52dce729;
|
||||
|
||||
k2 *= c2;
|
||||
k2 = ROTL64(k2, 33);
|
||||
k2 *= c1;
|
||||
h2 ^= k2;
|
||||
|
||||
h2 = ROTL64(h2, 31);
|
||||
h2 += h1;
|
||||
h2 = h2 * 5 + 0x38495ab5;
|
||||
}
|
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t *tail = (const uint8_t *)(data + nblocks * 16);
|
||||
|
||||
uint64_t k1 = 0;
|
||||
uint64_t k2 = 0;
|
||||
|
||||
switch (len & 15)
|
||||
{
|
||||
case 15:
|
||||
k2 ^= ((uint64_t)tail[14]) << 48;
|
||||
case 14:
|
||||
k2 ^= ((uint64_t)tail[13]) << 40;
|
||||
case 13:
|
||||
k2 ^= ((uint64_t)tail[12]) << 32;
|
||||
case 12:
|
||||
k2 ^= ((uint64_t)tail[11]) << 24;
|
||||
case 11:
|
||||
k2 ^= ((uint64_t)tail[10]) << 16;
|
||||
case 10:
|
||||
k2 ^= ((uint64_t)tail[9]) << 8;
|
||||
case 9:
|
||||
k2 ^= ((uint64_t)tail[8]) << 0;
|
||||
k2 *= c2;
|
||||
k2 = ROTL64(k2, 33);
|
||||
k2 *= c1;
|
||||
h2 ^= k2;
|
||||
|
||||
case 8:
|
||||
k1 ^= ((uint64_t)tail[7]) << 56;
|
||||
case 7:
|
||||
k1 ^= ((uint64_t)tail[6]) << 48;
|
||||
case 6:
|
||||
k1 ^= ((uint64_t)tail[5]) << 40;
|
||||
case 5:
|
||||
k1 ^= ((uint64_t)tail[4]) << 32;
|
||||
case 4:
|
||||
k1 ^= ((uint64_t)tail[3]) << 24;
|
||||
case 3:
|
||||
k1 ^= ((uint64_t)tail[2]) << 16;
|
||||
case 2:
|
||||
k1 ^= ((uint64_t)tail[1]) << 8;
|
||||
case 1:
|
||||
k1 ^= ((uint64_t)tail[0]) << 0;
|
||||
k1 *= c1;
|
||||
k1 = ROTL64(k1, 31);
|
||||
k1 *= c2;
|
||||
h1 ^= k1;
|
||||
}
|
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= len;
|
||||
h2 ^= len;
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
h1 = fmix64(h1);
|
||||
h2 = fmix64(h2);
|
||||
|
||||
h1 += h2;
|
||||
h2 += h1;
|
||||
|
||||
((uint64_t *)out)[0] = h1;
|
||||
((uint64_t *)out)[1] = h2;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
1475
common/src/packet.cpp
Normal file
1475
common/src/packet.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,57 +0,0 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <arpa/inet.h>
|
||||
|
||||
#include "tfe_addr_tuple4.h"
|
||||
|
||||
char *addr_tuple4_to_str(const struct addr_tuple4 *addr)
|
||||
{
|
||||
char *str_ret = NULL;
|
||||
|
||||
if (addr->addr_type == ADDR_TUPLE4_TYPE_V4)
|
||||
{
|
||||
char src_addr[INET_ADDRSTRLEN] = {0};
|
||||
char dst_addr[INET_ADDRSTRLEN] = {0};
|
||||
uint16_t src_port = ntohs((uint16_t)addr->src_port);
|
||||
uint16_t dst_port = ntohs((uint16_t)addr->dst_port);
|
||||
inet_ntop(AF_INET, &addr->addr_v4.src_addr, src_addr, sizeof(src_addr));
|
||||
inet_ntop(AF_INET, &addr->addr_v4.dst_addr, dst_addr, sizeof(dst_addr));
|
||||
asprintf(&str_ret, "%s %u %s %u", src_addr, src_port, dst_addr, dst_port);
|
||||
}
|
||||
|
||||
if (addr->addr_type == ADDR_TUPLE4_TYPE_V6)
|
||||
{
|
||||
char src_addr[INET6_ADDRSTRLEN] = {0};
|
||||
char dst_addr[INET6_ADDRSTRLEN] = {0};
|
||||
uint16_t src_port = ntohs((uint16_t)addr->src_port);
|
||||
uint16_t dst_port = ntohs((uint16_t)addr->dst_port);
|
||||
inet_ntop(AF_INET6, &addr->addr_v6.src_addr, src_addr, sizeof(src_addr));
|
||||
inet_ntop(AF_INET6, &addr->addr_v6.dst_addr, dst_addr, sizeof(dst_addr));
|
||||
asprintf(&str_ret, "%s %u %s %u", src_addr, src_port, dst_addr, dst_port);
|
||||
}
|
||||
|
||||
return str_ret;
|
||||
}
|
||||
|
||||
void addr_tuple4_reverse(const struct addr_tuple4 *orin, struct addr_tuple4 *out)
|
||||
{
|
||||
memset(out, 0, sizeof(struct addr_tuple4));
|
||||
|
||||
if (orin->addr_type == ADDR_TUPLE4_TYPE_V4)
|
||||
{
|
||||
out->addr_type = ADDR_TUPLE4_TYPE_V4;
|
||||
out->addr_v4.src_addr = orin->addr_v4.dst_addr;
|
||||
out->addr_v4.dst_addr = orin->addr_v4.src_addr;
|
||||
out->src_port = orin->dst_port;
|
||||
out->dst_port = orin->src_port;
|
||||
}
|
||||
|
||||
if (orin->addr_type == ADDR_TUPLE4_TYPE_V6)
|
||||
{
|
||||
out->addr_type = ADDR_TUPLE4_TYPE_V6;
|
||||
out->addr_v6.src_addr = orin->addr_v6.dst_addr;
|
||||
out->addr_v6.dst_addr = orin->addr_v6.src_addr;
|
||||
out->src_port = orin->dst_port;
|
||||
out->dst_port = orin->src_port;
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
#include <assert.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/udp.h>
|
||||
#include <netinet/tcp.h>
|
||||
#include <netinet/ether.h>
|
||||
#include <linux/if_tun.h>
|
||||
#include <sys/eventfd.h>
|
||||
@@ -18,7 +17,9 @@
|
||||
#include <time.h>
|
||||
|
||||
#include "tfe_ctrl_packet.h"
|
||||
#include "tfe_raw_packet.h"
|
||||
#include "packet.h"
|
||||
#include "ipv4_helpers.h"
|
||||
#include "tcp_helpers.h"
|
||||
#include "io_uring.h"
|
||||
#include "tfe_packet_io_fs.h"
|
||||
#include "tfe_cmsg.h"
|
||||
@@ -31,7 +32,8 @@
|
||||
#include "tfe_session_table.h"
|
||||
#include "tfe_packet_io.h"
|
||||
#include "tfe_fieldstat.h"
|
||||
|
||||
#include "dablooms.h"
|
||||
#include "timestamp.h"
|
||||
|
||||
/******************************************************************************
|
||||
* Struct
|
||||
@@ -134,10 +136,87 @@ struct metadata
|
||||
struct route_ctx route_ctx;
|
||||
};
|
||||
|
||||
struct packet_identify
|
||||
{
|
||||
// TCP
|
||||
uint32_t tcp_seq;
|
||||
uint32_t tcp_ack;
|
||||
uint16_t sport;
|
||||
uint16_t dport;
|
||||
uint16_t tcp_checksum;
|
||||
|
||||
// IPv4
|
||||
uint16_t ip_id;
|
||||
uint32_t ip_src;
|
||||
uint32_t ip_dst;
|
||||
} __attribute__((__packed__));
|
||||
|
||||
extern int tcp_policy_enforce(struct tcp_policy_enforcer *tcp_enforcer, struct tfe_cmsg *cmsg);
|
||||
extern int tfe_proxy_fds_accept(struct tfe_proxy * ctx, int fd_downstream, int fd_upstream, int fd_fake_c, int fd_fake_s, struct tfe_cmsg * cmsg);
|
||||
extern void chaining_policy_enforce(struct chaining_policy_enforcer *enforcer, struct tfe_cmsg *cmsg, uint64_t rule_id);
|
||||
|
||||
/******************************************************************************
|
||||
* dup packet filter
|
||||
******************************************************************************/
|
||||
|
||||
// return 0: success
|
||||
// reutrn -1: error
|
||||
static int get_packet_identify(struct packet *packet, struct packet_identify *key)
|
||||
{
|
||||
const struct layer_record *l3_layer_record = packet_get_innermost_layer(packet, LAYER_TYPE_IPV4);
|
||||
if (l3_layer_record == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
const struct layer_record *l4_layer_record = packet_get_innermost_layer(packet, LAYER_TYPE_TCP);
|
||||
if (l4_layer_record == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
const struct ip *iphdr = (const struct ip *)l3_layer_record->hdr_ptr;
|
||||
const struct tcphdr *tcphdr = (const struct tcphdr *)l4_layer_record->hdr_ptr;
|
||||
memset(key, 0, sizeof(struct packet_identify));
|
||||
key->tcp_seq = tcp_hdr_get_seq(tcphdr);
|
||||
key->tcp_ack = tcp_hdr_get_ack(tcphdr);
|
||||
key->sport = tcp_hdr_get_sport(tcphdr);
|
||||
key->dport = tcp_hdr_get_dport(tcphdr);
|
||||
key->tcp_checksum = tcp_hdr_get_checksum(tcphdr);
|
||||
key->ip_id = ipv4_hdr_get_ipid(iphdr);
|
||||
key->ip_src = ipv4_hdr_get_src(iphdr);
|
||||
key->ip_dst = ipv4_hdr_get_dst(iphdr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void add_packet_to_dablooms(struct packet *packet, struct expiry_dablooms_handle *handle)
|
||||
{
|
||||
struct packet_identify identify;
|
||||
if (get_packet_identify(packet, &identify) == -1)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
expiry_dablooms_add(handle, (const char *)&identify, sizeof(struct packet_identify), timestamp_get_sec());
|
||||
}
|
||||
|
||||
// return 1: hit
|
||||
// reutrn 0: no hit
|
||||
static int search_packet_from_dablooms(struct packet *packet, struct expiry_dablooms_handle *handle)
|
||||
{
|
||||
struct packet_identify identify;
|
||||
if (get_packet_identify(packet, &identify) == -1)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (expiry_dablooms_search(handle, (const char *)&identify, sizeof(struct packet_identify), timestamp_get_sec()) == 1)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* STATIC
|
||||
******************************************************************************/
|
||||
@@ -224,12 +303,6 @@ static void session_ctx_free(struct session_ctx *ctx)
|
||||
{
|
||||
if (ctx)
|
||||
{
|
||||
if (ctx->session_addr)
|
||||
{
|
||||
free(ctx->session_addr);
|
||||
ctx->session_addr = NULL;
|
||||
}
|
||||
|
||||
if (ctx->cmsg)
|
||||
{
|
||||
tfe_cmsg_destroy(&ctx->cmsg);
|
||||
@@ -449,9 +522,9 @@ static int tcp_restore_set_from_cmsg(struct tfe_cmsg *cmsg, struct tcp_restore_i
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int tcp_restore_set_from_pkg(struct addr_tuple4 *tuple4, struct tcp_restore_info *restore_info)
|
||||
static int tcp_restore_set_from_pkg(struct tuple4 *tuple4, struct tcp_restore_info *restore_info)
|
||||
{
|
||||
if (tuple4->addr_type == ADDR_TUPLE4_TYPE_V4)
|
||||
if (tuple4->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
struct sockaddr_in *in_addr_client;
|
||||
struct sockaddr_in *in_addr_server;
|
||||
@@ -468,15 +541,15 @@ static int tcp_restore_set_from_pkg(struct addr_tuple4 *tuple4, struct tcp_resto
|
||||
}
|
||||
|
||||
in_addr_client->sin_family = AF_INET;
|
||||
in_addr_client->sin_addr = tuple4->addr_v4.src_addr;
|
||||
in_addr_client->sin_addr = tuple4->src_addr.v4;
|
||||
in_addr_client->sin_port = tuple4->src_port;
|
||||
|
||||
in_addr_server->sin_family = AF_INET;
|
||||
in_addr_server->sin_addr = tuple4->addr_v4.dst_addr;
|
||||
in_addr_server->sin_addr = tuple4->dst_addr.v4;
|
||||
in_addr_server->sin_port = tuple4->dst_port;
|
||||
}
|
||||
|
||||
if (tuple4->addr_type == ADDR_TUPLE4_TYPE_V6)
|
||||
if (tuple4->ip_type == IP_TYPE_V6)
|
||||
{
|
||||
struct sockaddr_in6 *in6_addr_client;
|
||||
struct sockaddr_in6 *in6_addr_server;
|
||||
@@ -493,11 +566,11 @@ static int tcp_restore_set_from_pkg(struct addr_tuple4 *tuple4, struct tcp_resto
|
||||
}
|
||||
|
||||
in6_addr_client->sin6_family = AF_INET6;
|
||||
in6_addr_client->sin6_addr = tuple4->addr_v6.src_addr;
|
||||
in6_addr_client->sin6_addr = tuple4->src_addr.v6;
|
||||
in6_addr_client->sin6_port = tuple4->src_port;
|
||||
|
||||
in6_addr_server->sin6_family = AF_INET6;
|
||||
in6_addr_server->sin6_addr = tuple4->addr_v6.dst_addr;
|
||||
in6_addr_server->sin6_addr = tuple4->dst_addr.v6;
|
||||
in6_addr_server->sin6_port = tuple4->dst_port;
|
||||
}
|
||||
|
||||
@@ -997,6 +1070,34 @@ static void packet_io_send_fake_pkt(struct packet_io_thread_ctx *thread, struct
|
||||
marsio_send_burst_with_options(packet_io->dev_nf_interface.mr_path, thread->thread_index, tx_buffs, 3, MARSIO_SEND_OPT_REHASH);
|
||||
}
|
||||
|
||||
int raw_traffic_decapsulate(struct packet *handler, const char *raw_data, int raw_len, char **header, int *header_len, int *is_ipv4)
|
||||
{
|
||||
const struct layer_record *l2_tun_layer_record = NULL;
|
||||
const struct layer_record *l3_layer_record = NULL;
|
||||
const struct layer_record *l4_layer_record = NULL;
|
||||
|
||||
l4_layer_record = packet_get_innermost_layer(handler, LAYER_TYPE_L4);
|
||||
if (l4_layer_record == NULL)
|
||||
return -1;
|
||||
|
||||
if (l4_layer_record->type != LAYER_TYPE_TCP)
|
||||
return -1;
|
||||
|
||||
l3_layer_record = packet_get_innermost_layer(handler, LAYER_TYPE_L3);
|
||||
if (l3_layer_record == NULL)
|
||||
return -1;
|
||||
|
||||
*is_ipv4 = l3_layer_record->type == LAYER_TYPE_IPV4 ? 1 : 0;
|
||||
l2_tun_layer_record = packet_get_innermost_layer(handler, LAYER_TYPE_L2_TUN);
|
||||
if (l2_tun_layer_record == NULL)
|
||||
return -1;
|
||||
|
||||
*header_len = l3_layer_record->hdr_offset;
|
||||
*header = (char *)calloc(*header_len, sizeof(char));
|
||||
memcpy(*header, raw_data, *header_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// return 0 : success
|
||||
// return -1 : error
|
||||
static int handle_session_opening(struct metadata *meta, struct ctrl_pkt_parser *parser, int thread_seq, void *ctx)
|
||||
@@ -1021,7 +1122,7 @@ static int handle_session_opening(struct metadata *meta, struct ctrl_pkt_parser
|
||||
uint8_t enable_decrypted_traffic_steering = 0;
|
||||
struct ethhdr *ether_hdr = NULL;
|
||||
struct session_ctx *s_ctx = NULL;
|
||||
struct addr_tuple4 inner_tuple4;
|
||||
struct tuple4 inner_tuple4;
|
||||
struct tcp_restore_info restore_info;
|
||||
memset(&inner_tuple4, 0, sizeof(inner_tuple4));
|
||||
memset(&restore_info, 0, sizeof(restore_info));
|
||||
@@ -1031,15 +1132,14 @@ static int handle_session_opening(struct metadata *meta, struct ctrl_pkt_parser
|
||||
struct packet_io_fs *packet_io_fs = thread->ret_fs_state;
|
||||
void * logger = thread->logger;
|
||||
|
||||
struct raw_pkt_parser raw_parser;
|
||||
raw_packet_parser_init(&raw_parser, meta->session_id, LAYER_TYPE_ALL, 8);
|
||||
const void *payload = raw_packet_parser_parse(&raw_parser, (const void *)meta->raw_data, meta->raw_len, logger);
|
||||
struct packet pkt;
|
||||
const void *payload = packet_parse(&pkt, (const char *)meta->raw_data, meta->raw_len);
|
||||
if ((char *)payload - meta->raw_data != meta->l7offset)
|
||||
{
|
||||
uint16_t offset = (char *)payload - meta->raw_data;
|
||||
TFE_LOG_ERROR(logger, "%s: incorrect dataoffset in the control zone of session %lu, offset:%u, l7offset:%u, payload:%p, raw_data:%p", LOG_TAG_PKTIO, meta->session_id, offset, meta->l7offset, payload, meta->raw_data);
|
||||
}
|
||||
raw_packet_parser_get_most_inner_tuple4(&raw_parser, &inner_tuple4, logger);
|
||||
packet_get_innermost_tuple4(&pkt, &inner_tuple4);
|
||||
tfe_cmsg_get_value(parser->cmsg, TFE_CMSG_TCP_RESTORE_PROTOCOL, (unsigned char *)&stream_protocol_in_char, sizeof(stream_protocol_in_char), &size);
|
||||
|
||||
uint64_t rule_id = 0;
|
||||
@@ -1161,7 +1261,7 @@ passthrough:
|
||||
s_ctx->protocol = stream_protocol_in_char;
|
||||
s_ctx->ref_thread_ctx = thread;
|
||||
s_ctx->session_id = meta->session_id;
|
||||
s_ctx->session_addr = addr_tuple4_to_str(&inner_tuple4);
|
||||
tuple4_tostring(&inner_tuple4, s_ctx->session_addr, sizeof(s_ctx->session_addr));
|
||||
s_ctx->cmsg = parser->cmsg;
|
||||
s_ctx->is_passthrough = is_passthrough;
|
||||
metadata_deep_copy(s_ctx->ctrl_meta, meta);
|
||||
@@ -1176,7 +1276,7 @@ passthrough:
|
||||
|
||||
// s2c
|
||||
s_ctx->s2c_info.is_e2i_dir = !meta->is_e2i_dir;
|
||||
addr_tuple4_reverse(&inner_tuple4, &s_ctx->s2c_info.tuple4);
|
||||
tuple4_reverse(&inner_tuple4, &s_ctx->s2c_info.tuple4);
|
||||
|
||||
s_ctx->policy_ids = parser->tfe_policy_ids[0];
|
||||
|
||||
@@ -1184,9 +1284,9 @@ passthrough:
|
||||
route_ctx_copy(&s_ctx->ctrl_meta->route_ctx, &meta->route_ctx);
|
||||
|
||||
if (parser->seq_len > 0)
|
||||
raw_traffic_decapsulate(&raw_parser, parser->seq_header, parser->seq_len, &s_ctx->c2s_info.header_data, &s_ctx->c2s_info.header_len, &s_ctx->c2s_info.is_ipv4);
|
||||
raw_traffic_decapsulate(&pkt, parser->seq_header, parser->seq_len, &s_ctx->c2s_info.header_data, &s_ctx->c2s_info.header_len, &s_ctx->c2s_info.is_ipv4);
|
||||
if (parser->ack_len > 0)
|
||||
raw_traffic_decapsulate(&raw_parser, parser->ack_header, parser->ack_len, &s_ctx->s2c_info.header_data, &s_ctx->s2c_info.header_len, &s_ctx->s2c_info.is_ipv4);
|
||||
raw_traffic_decapsulate(&pkt, parser->ack_header, parser->ack_len, &s_ctx->s2c_info.header_data, &s_ctx->s2c_info.header_len, &s_ctx->s2c_info.is_ipv4);
|
||||
|
||||
if (s_ctx->c2s_info.is_e2i_dir) {
|
||||
sids_copy(&s_ctx->raw_meta_e2i->sids, &parser->seq_sids);
|
||||
@@ -1350,6 +1450,20 @@ static int handle_raw_packet_from_nf(struct packet_io *handle, marsio_buff_t *rx
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (thread->ref_acceptor_ctx->dup_packet_filter_enable == 1)
|
||||
{
|
||||
struct packet packet;
|
||||
packet_parse(&packet, (const char *)raw_data, raw_len);
|
||||
if (search_packet_from_dablooms(&packet, thread->dup_packet_filter) == 1)
|
||||
{
|
||||
throughput_metrics_inc(&packet_io_fs->raw_pkt_rx, 1, raw_len);
|
||||
throughput_metrics_inc(&packet_io_fs->raw_bypass, 1, raw_len);
|
||||
throughput_metrics_inc(&packet_io_fs->dup_bypass, 1, raw_len);
|
||||
marsio_send_burst(handle->dev_nf_interface.mr_path, thread_seq, &rx_buff, 1);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
struct session_node *node = session_table_search_by_id(thread->session_table, meta.session_id);
|
||||
if (node == NULL) {
|
||||
throughput_metrics_inc(&packet_io_fs->raw_pkt_rx, 1, raw_len);
|
||||
@@ -1357,17 +1471,20 @@ static int handle_raw_packet_from_nf(struct packet_io *handle, marsio_buff_t *rx
|
||||
|
||||
if (thread->ref_acceptor_ctx->debug)
|
||||
{
|
||||
struct addr_tuple4 inner_addr;
|
||||
struct raw_pkt_parser raw_parser;
|
||||
memset(&inner_addr, 0, sizeof(struct addr_tuple4));
|
||||
raw_packet_parser_init(&raw_parser, 0, LAYER_TYPE_ALL, 8);
|
||||
raw_packet_parser_parse(&raw_parser, (const void *)raw_data, raw_len, logger);
|
||||
raw_packet_parser_get_most_inner_tuple4(&raw_parser, &inner_addr, logger);
|
||||
uint16_t ipid = raw_packet_parser_get_most_inner_ipid(&raw_parser);
|
||||
|
||||
char *str = addr_tuple4_to_str(&inner_addr);
|
||||
TFE_LOG_ERROR(logger, "packet from nf %lu: %s (ipid: %u) miss session table", meta.session_id, str, ipid);
|
||||
free(str);
|
||||
struct tuple4 inner_addr;
|
||||
struct packet pkt;
|
||||
memset(&inner_addr, 0, sizeof(struct tuple4));
|
||||
packet_parse(&pkt, (const char *)raw_data, raw_len);
|
||||
packet_get_innermost_tuple4(&pkt, &inner_addr);
|
||||
uint16_t ipid = 0;
|
||||
const struct layer_record *ipv4_layer = packet_get_innermost_layer(&pkt, LAYER_TYPE_IPV4);
|
||||
if (ipv4_layer)
|
||||
{
|
||||
ipid = ipv4_hdr_get_ipid((const struct ip *)ipv4_layer->hdr_ptr);
|
||||
}
|
||||
char buffer[128] = {0};
|
||||
tuple4_tostring(&inner_addr, buffer, sizeof(buffer));
|
||||
TFE_LOG_ERROR(logger, "packet from nf %lu: %s (ipid: %u) miss session table", meta.session_id, buffer, ipid);
|
||||
}
|
||||
|
||||
marsio_send_burst(handle->dev_nf_interface.mr_path, thread_seq, &rx_buff, 1);
|
||||
@@ -1759,17 +1876,20 @@ int packet_io_polling_nf_interface(struct packet_io *handle, int thread_seq, voi
|
||||
|
||||
if (thread->ref_acceptor_ctx->debug)
|
||||
{
|
||||
struct addr_tuple4 inner_addr;
|
||||
struct raw_pkt_parser raw_parser;
|
||||
memset(&inner_addr, 0, sizeof(struct addr_tuple4));
|
||||
raw_packet_parser_init(&raw_parser, 0, LAYER_TYPE_ALL, 8);
|
||||
raw_packet_parser_parse(&raw_parser, (const void *)raw_data, raw_len, thread->logger);
|
||||
raw_packet_parser_get_most_inner_tuple4(&raw_parser, &inner_addr, thread->logger);
|
||||
uint16_t ipid = raw_packet_parser_get_most_inner_ipid(&raw_parser);
|
||||
|
||||
char *str = addr_tuple4_to_str(&inner_addr);
|
||||
TFE_LOG_DEBUG(thread->logger, "recv packet %s (ipid: %u)", str, ipid);
|
||||
free(str);
|
||||
struct tuple4 inner_addr;
|
||||
struct packet pkt;
|
||||
memset(&inner_addr, 0, sizeof(struct tuple4));
|
||||
packet_parse(&pkt, (const char *)raw_data, raw_len);
|
||||
packet_get_innermost_tuple4(&pkt, &inner_addr);
|
||||
uint16_t ipid = 0;
|
||||
const struct layer_record *ipv4_layer = packet_get_innermost_layer(&pkt, LAYER_TYPE_IPV4);
|
||||
if (ipv4_layer)
|
||||
{
|
||||
ipid = ipv4_hdr_get_ipid((const struct ip *)ipv4_layer->hdr_ptr);
|
||||
}
|
||||
char buffer[128] = {0};
|
||||
tuple4_tostring(&inner_addr, buffer, sizeof(buffer));
|
||||
TFE_LOG_DEBUG(thread->logger, "recv packet %s (ipid: %u)", buffer, ipid);
|
||||
}
|
||||
|
||||
if (marsio_buff_is_ctrlbuf(rx_buff))
|
||||
@@ -1798,14 +1918,13 @@ void handle_decryption_packet_from_tap(const char *data, int len, void *args)
|
||||
struct acceptor_kni_v4 *acceptor_ctx = thread->ref_acceptor_ctx;
|
||||
struct packet_io *packet_io = thread->ref_io;
|
||||
struct packet_io_fs *packet_io_fs = thread->ret_fs_state;
|
||||
struct addr_tuple4 inner_addr;
|
||||
struct raw_pkt_parser raw_parser;
|
||||
struct tuple4 inner_addr;
|
||||
struct packet pkt;
|
||||
void * logger = thread->logger;
|
||||
|
||||
memset(&inner_addr, 0, sizeof(struct addr_tuple4));
|
||||
raw_packet_parser_init(&raw_parser, 0, LAYER_TYPE_ALL, 8);
|
||||
raw_packet_parser_parse(&raw_parser, (const void *)data, len, logger);
|
||||
raw_packet_parser_get_most_inner_tuple4(&raw_parser, &inner_addr, logger);
|
||||
memset(&inner_addr, 0, sizeof(struct tuple4));
|
||||
packet_parse(&pkt, (const char *)data, len);
|
||||
packet_get_innermost_tuple4(&pkt, &inner_addr);
|
||||
|
||||
throughput_metrics_inc(&packet_io_fs->decrypt_rx, 1, len);
|
||||
|
||||
@@ -1813,10 +1932,15 @@ void handle_decryption_packet_from_tap(const char *data, int len, void *args)
|
||||
if (node == NULL) {
|
||||
if (thread->ref_acceptor_ctx->debug)
|
||||
{
|
||||
char *str = addr_tuple4_to_str(&inner_addr);
|
||||
uint16_t ipid = raw_packet_parser_get_most_inner_ipid(&raw_parser);
|
||||
TFE_LOG_ERROR(logger, "decypted packet from tap %s (ipid: %u) miss session table", str, ipid);
|
||||
free(str);
|
||||
char buffer[128] = {0};
|
||||
tuple4_tostring(&inner_addr, buffer, sizeof(buffer));
|
||||
int16_t ipid = 0;
|
||||
const struct layer_record *ipv4_layer = packet_get_innermost_layer(&pkt, LAYER_TYPE_IPV4);
|
||||
if (ipv4_layer)
|
||||
{
|
||||
ipid = ipv4_hdr_get_ipid((const struct ip *)ipv4_layer->hdr_ptr);
|
||||
}
|
||||
TFE_LOG_ERROR(logger, "decypted packet from tap %s (ipid: %u) miss session table", buffer, ipid);
|
||||
}
|
||||
|
||||
throughput_metrics_inc(&packet_io_fs->decrypt_rxdrop, 1, len);
|
||||
@@ -1847,7 +1971,7 @@ void handle_decryption_packet_from_tap(const char *data, int len, void *args)
|
||||
meta.sids.elems[0] = acceptor_ctx->sce_sids;
|
||||
meta.sids.elems[1] = acceptor_ctx->proxy_sids;
|
||||
|
||||
if (memcmp(&inner_addr, &s_ctx->c2s_info.tuple4, sizeof(struct addr_tuple4)) == 0) {
|
||||
if (memcmp(&inner_addr, &s_ctx->c2s_info.tuple4, sizeof(struct tuple4)) == 0) {
|
||||
meta.is_e2i_dir = s_ctx->c2s_info.is_e2i_dir;
|
||||
throughput_metrics_inc(&packet_io_fs->tap_c_pkt_rx, 1, len);
|
||||
}
|
||||
@@ -1876,8 +2000,8 @@ void handle_raw_packet_from_tap(const char *data, int len, void *args)
|
||||
struct packet_io_thread_ctx *thread = (struct packet_io_thread_ctx *)args;
|
||||
struct packet_io *packet_io = thread->ref_io;
|
||||
struct packet_io_fs *packet_io_fs = thread->ret_fs_state;
|
||||
struct addr_tuple4 inner_addr;
|
||||
struct raw_pkt_parser raw_parser;
|
||||
struct tuple4 inner_addr;
|
||||
struct packet pkt;
|
||||
struct metadata meta = {0};
|
||||
void * logger = thread->logger;
|
||||
char *dst = NULL;
|
||||
@@ -1885,10 +2009,9 @@ void handle_raw_packet_from_tap(const char *data, int len, void *args)
|
||||
int header_len = 0;
|
||||
int packet_len = 0;
|
||||
|
||||
memset(&inner_addr, 0, sizeof(struct addr_tuple4));
|
||||
raw_packet_parser_init(&raw_parser, 0, LAYER_TYPE_ALL, 8);
|
||||
raw_packet_parser_parse(&raw_parser, (const void *)data, len, logger);
|
||||
raw_packet_parser_get_most_inner_tuple4(&raw_parser, &inner_addr, logger);
|
||||
memset(&inner_addr, 0, sizeof(struct tuple4));
|
||||
packet_parse(&pkt, (const char *)data, len);
|
||||
packet_get_innermost_tuple4(&pkt, &inner_addr);
|
||||
|
||||
throughput_metrics_inc(&packet_io_fs->tap_pkt_rx, 1, len);
|
||||
|
||||
@@ -1898,10 +2021,15 @@ void handle_raw_packet_from_tap(const char *data, int len, void *args)
|
||||
|
||||
if (thread->ref_acceptor_ctx->debug)
|
||||
{
|
||||
char *str = addr_tuple4_to_str(&inner_addr);
|
||||
uint16_t ipid = raw_packet_parser_get_most_inner_ipid(&raw_parser);
|
||||
TFE_LOG_ERROR(logger, "raw packet from tap %s (ipid: %u) miss session table", str, ipid);
|
||||
free(str);
|
||||
char buffer[128] = {0};
|
||||
tuple4_tostring(&inner_addr, buffer, sizeof(buffer));
|
||||
uint16_t ipid = 0;
|
||||
const struct layer_record *ipv4_layer = packet_get_innermost_layer(&pkt, LAYER_TYPE_IPV4);
|
||||
if (ipv4_layer)
|
||||
{
|
||||
ipid = ipv4_hdr_get_ipid((const struct ip *)ipv4_layer->hdr_ptr);
|
||||
}
|
||||
TFE_LOG_ERROR(logger, "raw packet from tap %s (ipid: %u) miss session table", buffer, ipid);
|
||||
}
|
||||
|
||||
return;
|
||||
@@ -1917,7 +2045,7 @@ void handle_raw_packet_from_tap(const char *data, int len, void *args)
|
||||
return;
|
||||
}
|
||||
|
||||
if (memcmp(&inner_addr, &s_ctx->c2s_info.tuple4, sizeof(struct addr_tuple4)) == 0)
|
||||
if (memcmp(&inner_addr, &s_ctx->c2s_info.tuple4, sizeof(struct tuple4)) == 0)
|
||||
{
|
||||
meta.is_e2i_dir = s_ctx->c2s_info.is_e2i_dir;
|
||||
src_mac = s_ctx->client_mac;
|
||||
@@ -1967,5 +2095,11 @@ void handle_raw_packet_from_tap(const char *data, int len, void *args)
|
||||
packet_io_set_metadata(tx_buffs[0], &meta, logger);
|
||||
add_ether_header(dst, src_mac, dst_mac);
|
||||
throughput_metrics_inc(&packet_io_fs->raw_pkt_tx, 1, packet_len);
|
||||
|
||||
if (thread->ref_acceptor_ctx->dup_packet_filter_enable == 1)
|
||||
{
|
||||
add_packet_to_dablooms(&pkt, thread->dup_packet_filter);
|
||||
}
|
||||
|
||||
marsio_send_burst_with_options(packet_io->dev_nf_interface.mr_path, thread->thread_index, tx_buffs, 1, MARSIO_SEND_OPT_REHASH);
|
||||
}
|
||||
@@ -19,6 +19,9 @@ enum PACKET_IO_STAT_FIELD
|
||||
STAT_DECRYPTED_RX_PKT,
|
||||
STAT_DECRYPTED_RX_B,
|
||||
|
||||
STAT_DUP_PKT_BYPASS_PKT,
|
||||
STAT_DUP_PKT_BYPASS_B,
|
||||
|
||||
STAT_RAW_PKT_BYPASS_PKT,
|
||||
STAT_RAW_PKT_BYPASS_B,
|
||||
|
||||
@@ -81,6 +84,9 @@ static const char *stat_map[] =
|
||||
[STAT_DECRYPTED_RX_PKT] = "decrypt_rx_pkt",
|
||||
[STAT_DECRYPTED_RX_B] = "decrypt_rx_B",
|
||||
|
||||
[STAT_DUP_PKT_BYPASS_PKT] = "dup_bypass_pkt",
|
||||
[STAT_DUP_PKT_BYPASS_B] = "dup_bypass_B",
|
||||
|
||||
[STAT_RAW_PKT_BYPASS_PKT] = "raw_bypass_pkt",
|
||||
[STAT_RAW_PKT_BYPASS_B] = "raw_bypass_B",
|
||||
|
||||
@@ -185,6 +191,9 @@ void packet_io_fs_dump(struct packet_io_fs *handle)
|
||||
FS_operate(handle->fs_handle, handle->fs_id[STAT_DECRYPTED_RX_PKT], 0, FS_OP_SET, __atomic_fetch_add(&(handle->decrypt_rx.n_pkts), 0, __ATOMIC_RELAXED));
|
||||
FS_operate(handle->fs_handle, handle->fs_id[STAT_DECRYPTED_RX_B], 0, FS_OP_SET, __atomic_fetch_add(&(handle->decrypt_rx.n_bytes), 0, __ATOMIC_RELAXED));
|
||||
|
||||
FS_operate(handle->fs_handle, handle->fs_id[STAT_DUP_PKT_BYPASS_PKT], 0, FS_OP_SET, __atomic_fetch_add(&(handle->dup_bypass.n_pkts), 0, __ATOMIC_RELAXED));
|
||||
FS_operate(handle->fs_handle, handle->fs_id[STAT_DUP_PKT_BYPASS_B], 0, FS_OP_SET, __atomic_fetch_add(&(handle->dup_bypass.n_pkts), 0, __ATOMIC_RELAXED));
|
||||
|
||||
FS_operate(handle->fs_handle, handle->fs_id[STAT_RAW_PKT_BYPASS_PKT], 0, FS_OP_SET, __atomic_fetch_add(&(handle->raw_bypass.n_pkts), 0, __ATOMIC_RELAXED));
|
||||
FS_operate(handle->fs_handle, handle->fs_id[STAT_RAW_PKT_BYPASS_B], 0, FS_OP_SET, __atomic_fetch_add(&(handle->raw_bypass.n_bytes), 0, __ATOMIC_RELAXED));
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -86,7 +86,7 @@ uint64_t session_table_count(struct session_table *table)
|
||||
|
||||
// session_addr : deep copy
|
||||
// val_data : shallow copy (malloc by user, free by val_freecb)
|
||||
int session_table_insert(struct session_table *table, uint64_t session_id, const struct addr_tuple4 *session_addr, void *val_data, const fn_free_cb *val_freecb)
|
||||
int session_table_insert(struct session_table *table, uint64_t session_id, const struct tuple4 *session_addr, void *val_data, const fn_free_cb *val_freecb)
|
||||
{
|
||||
struct session_node *temp = NULL;
|
||||
HASH_FIND(hh1, table->root_by_id, &session_id, sizeof(session_id), temp);
|
||||
@@ -100,7 +100,7 @@ int session_table_insert(struct session_table *table, uint64_t session_id, const
|
||||
assert(temp);
|
||||
|
||||
temp->session_id = session_id;
|
||||
memcpy(&temp->session_addr, session_addr, sizeof(struct addr_tuple4));
|
||||
memcpy(&temp->session_addr, session_addr, sizeof(struct tuple4));
|
||||
temp->val_data = val_data;
|
||||
temp->val_freecb = val_freecb;
|
||||
|
||||
@@ -141,20 +141,21 @@ int session_table_delete_by_id(struct session_table *table, uint64_t session_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int session_table_delete_by_addr(struct session_table *table, const struct addr_tuple4 *session_addr)
|
||||
int session_table_delete_by_addr(struct session_table *table, const struct tuple4 *session_addr)
|
||||
{
|
||||
struct session_node *temp = NULL;
|
||||
char *addr_str = addr_tuple4_to_str(session_addr);
|
||||
HASH_FIND(hh2, table->root_by_addr, session_addr, sizeof(struct addr_tuple4), temp);
|
||||
char buffer[128] = {0};
|
||||
tuple4_tostring(session_addr, buffer, sizeof(buffer));
|
||||
HASH_FIND(hh2, table->root_by_addr, session_addr, sizeof(struct tuple4), temp);
|
||||
if (!temp)
|
||||
{
|
||||
struct addr_tuple4 reverse_addr;
|
||||
addr_tuple4_reverse(session_addr, &reverse_addr);
|
||||
HASH_FIND(hh2, table->root_by_addr, &reverse_addr, sizeof(struct addr_tuple4), temp);
|
||||
struct tuple4 reverse_addr;
|
||||
memset(&reverse_addr, 0, sizeof(struct tuple4));
|
||||
tuple4_reverse(session_addr, &reverse_addr);
|
||||
HASH_FIND(hh2, table->root_by_addr, &reverse_addr, sizeof(struct tuple4), temp);
|
||||
if (!temp)
|
||||
{
|
||||
TFE_LOG_DEBUG(g_packet_io_logger, "%s: delete: key %s not exists", LOG_TAG_STABLE, addr_str);
|
||||
free(addr_str);
|
||||
TFE_LOG_DEBUG(g_packet_io_logger, "%s: delete: key %s not exists", LOG_TAG_STABLE, buffer);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -171,9 +172,7 @@ int session_table_delete_by_addr(struct session_table *table, const struct addr_
|
||||
free(temp);
|
||||
temp = NULL;
|
||||
|
||||
TFE_LOG_DEBUG(g_packet_io_logger, "%s: delete: key %s success", LOG_TAG_STABLE, addr_str);
|
||||
free(addr_str);
|
||||
addr_str = NULL;
|
||||
TFE_LOG_DEBUG(g_packet_io_logger, "%s: delete: key %s success", LOG_TAG_STABLE, buffer);
|
||||
table->session_node_count--;
|
||||
|
||||
return 0;
|
||||
@@ -194,15 +193,16 @@ struct session_node *session_table_search_by_id(struct session_table *table, uin
|
||||
return temp;
|
||||
}
|
||||
|
||||
struct session_node *session_table_search_by_addr(struct session_table *table, const struct addr_tuple4 *session_addr)
|
||||
struct session_node *session_table_search_by_addr(struct session_table *table, const struct tuple4 *session_addr)
|
||||
{
|
||||
struct session_node *temp = NULL;
|
||||
HASH_FIND(hh2, table->root_by_addr, session_addr, sizeof(struct addr_tuple4), temp);
|
||||
HASH_FIND(hh2, table->root_by_addr, session_addr, sizeof(struct tuple4), temp);
|
||||
if (!temp)
|
||||
{
|
||||
struct addr_tuple4 reverse_addr;
|
||||
addr_tuple4_reverse(session_addr, &reverse_addr);
|
||||
HASH_FIND(hh2, table->root_by_addr, &reverse_addr, sizeof(struct addr_tuple4), temp);
|
||||
struct tuple4 reverse_addr;
|
||||
memset(&reverse_addr, 0, sizeof(struct tuple4));
|
||||
tuple4_reverse(session_addr, &reverse_addr);
|
||||
HASH_FIND(hh2, table->root_by_addr, &reverse_addr, sizeof(struct tuple4), temp);
|
||||
if (!temp)
|
||||
{
|
||||
return NULL;
|
||||
|
||||
38
common/src/timestamp.cpp
Normal file
38
common/src/timestamp.cpp
Normal file
@@ -0,0 +1,38 @@
|
||||
#include <time.h>
|
||||
|
||||
#include "timestamp.h"
|
||||
|
||||
// 1 s = 1000 ms
|
||||
// 1 ms = 1000 us
|
||||
// 1 us = 1000 ns
|
||||
|
||||
struct timestamp
|
||||
{
|
||||
struct timespec ts;
|
||||
uint64_t ts_in_msec;
|
||||
uint64_t ts_in_sec;
|
||||
} g_timestamp;
|
||||
|
||||
#define ATOMIC_SET(x, y) __atomic_store_n(x, y, __ATOMIC_RELAXED)
|
||||
#define ATOMIC_READ(x) __atomic_load_n(x, __ATOMIC_RELAXED)
|
||||
|
||||
void timestamp_update()
|
||||
{
|
||||
clock_gettime(CLOCK_MONOTONIC, &g_timestamp.ts);
|
||||
|
||||
uint64_t current_timestamp_ms = g_timestamp.ts.tv_sec * 1000 + g_timestamp.ts.tv_nsec / 1000000;
|
||||
uint64_t current_timestamp_sec = g_timestamp.ts.tv_sec + g_timestamp.ts.tv_nsec / 1000000000;
|
||||
|
||||
ATOMIC_SET(&g_timestamp.ts_in_msec, current_timestamp_ms);
|
||||
ATOMIC_SET(&g_timestamp.ts_in_sec, current_timestamp_sec);
|
||||
}
|
||||
|
||||
uint64_t timestamp_get_sec()
|
||||
{
|
||||
return ATOMIC_READ(&g_timestamp.ts_in_sec);
|
||||
}
|
||||
|
||||
uint64_t timestamp_get_msec()
|
||||
{
|
||||
return ATOMIC_READ(&g_timestamp.ts_in_msec);
|
||||
}
|
||||
452
common/src/tuple.cpp
Normal file
452
common/src/tuple.cpp
Normal file
@@ -0,0 +1,452 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "tuple.h"
|
||||
#include "crc32_hash.h"
|
||||
|
||||
uint32_t tuple2_hash(const struct tuple2 *tuple)
|
||||
{
|
||||
uint32_t src_addr_hash = 0;
|
||||
uint32_t dst_addr_hash = 0;
|
||||
uint32_t hash = crc32_hash(&tuple->ip_type, sizeof(tuple->ip_type), 0);
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v4, sizeof(tuple->src_addr.v4), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v4, sizeof(tuple->dst_addr.v4), hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v6, sizeof(tuple->src_addr.v6), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v6, sizeof(tuple->dst_addr.v6), hash);
|
||||
}
|
||||
hash = src_addr_hash + dst_addr_hash;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint32_t tuple4_hash(const struct tuple4 *tuple)
|
||||
{
|
||||
uint32_t src_addr_hash = 0;
|
||||
uint32_t dst_addr_hash = 0;
|
||||
uint32_t src_port_hash = 0;
|
||||
uint32_t dst_port_hash = 0;
|
||||
uint32_t hash = crc32_hash(&tuple->ip_type, sizeof(tuple->ip_type), 0);
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v4, sizeof(tuple->src_addr.v4), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v4, sizeof(tuple->dst_addr.v4), hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v6, sizeof(tuple->src_addr.v6), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v6, sizeof(tuple->dst_addr.v6), hash);
|
||||
}
|
||||
hash = src_addr_hash + dst_addr_hash;
|
||||
|
||||
src_port_hash = crc32_hash(&tuple->src_port, sizeof(tuple->src_port), hash);
|
||||
dst_port_hash = crc32_hash(&tuple->dst_port, sizeof(tuple->dst_port), hash);
|
||||
hash = src_port_hash + dst_port_hash;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint32_t tuple5_hash(const struct tuple5 *tuple)
|
||||
{
|
||||
uint32_t src_addr_hash = 0;
|
||||
uint32_t dst_addr_hash = 0;
|
||||
uint32_t src_port_hash = 0;
|
||||
uint32_t dst_port_hash = 0;
|
||||
uint32_t hash = crc32_hash(&tuple->ip_type, sizeof(tuple->ip_type), 0);
|
||||
hash = crc32_hash(&tuple->ip_proto, sizeof(tuple->ip_proto), hash);
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v4, sizeof(tuple->src_addr.v4), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v4, sizeof(tuple->dst_addr.v4), hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v6, sizeof(tuple->src_addr.v6), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v6, sizeof(tuple->dst_addr.v6), hash);
|
||||
}
|
||||
hash = src_addr_hash + dst_addr_hash;
|
||||
|
||||
src_port_hash = crc32_hash(&tuple->src_port, sizeof(tuple->src_port), hash);
|
||||
dst_port_hash = crc32_hash(&tuple->dst_port, sizeof(tuple->dst_port), hash);
|
||||
hash = src_port_hash + dst_port_hash;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
uint32_t tuple6_hash(const struct tuple6 *tuple)
|
||||
{
|
||||
uint32_t src_addr_hash = 0;
|
||||
uint32_t dst_addr_hash = 0;
|
||||
uint32_t src_port_hash = 0;
|
||||
uint32_t dst_port_hash = 0;
|
||||
uint32_t hash = crc32_hash(&tuple->ip_type, sizeof(tuple->ip_type), 0);
|
||||
hash = crc32_hash(&tuple->ip_proto, sizeof(tuple->ip_proto), hash);
|
||||
hash = crc32_hash(&tuple->security_zone, sizeof(tuple->security_zone), hash);
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v4, sizeof(tuple->src_addr.v4), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v4, sizeof(tuple->dst_addr.v4), hash);
|
||||
}
|
||||
else
|
||||
{
|
||||
src_addr_hash = crc32_hash(&tuple->src_addr.v6, sizeof(tuple->src_addr.v6), hash);
|
||||
dst_addr_hash = crc32_hash(&tuple->dst_addr.v6, sizeof(tuple->dst_addr.v6), hash);
|
||||
}
|
||||
hash = src_addr_hash + dst_addr_hash;
|
||||
|
||||
src_port_hash = crc32_hash(&tuple->src_port, sizeof(tuple->src_port), hash);
|
||||
dst_port_hash = crc32_hash(&tuple->dst_port, sizeof(tuple->dst_port), hash);
|
||||
hash = src_port_hash + dst_port_hash;
|
||||
|
||||
return hash;
|
||||
}
|
||||
|
||||
int tuple2_cmp(const struct tuple2 *tuple_a, const struct tuple2 *tuple_b)
|
||||
{
|
||||
if (tuple_a->ip_type != tuple_b->ip_type)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
if (tuple_a->src_addr.v4.s_addr != tuple_b->src_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_addr.v4.s_addr != tuple_b->dst_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (memcmp(&tuple_a->src_addr.v6, &tuple_b->src_addr.v6, sizeof(tuple_a->src_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (memcmp(&tuple_a->dst_addr.v6, &tuple_b->dst_addr.v6, sizeof(tuple_a->dst_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tuple4_cmp(const struct tuple4 *tuple_a, const struct tuple4 *tuple_b)
|
||||
{
|
||||
if (tuple_a->src_port != tuple_b->src_port)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_port != tuple_b->dst_port)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type != tuple_b->ip_type)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
if (tuple_a->src_addr.v4.s_addr != tuple_b->src_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_addr.v4.s_addr != tuple_b->dst_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (memcmp(&tuple_a->src_addr.v6, &tuple_b->src_addr.v6, sizeof(tuple_a->src_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (memcmp(&tuple_a->dst_addr.v6, &tuple_b->dst_addr.v6, sizeof(tuple_a->dst_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tuple5_cmp(const struct tuple5 *tuple_a, const struct tuple5 *tuple_b)
|
||||
{
|
||||
if (tuple_a->ip_proto != tuple_b->ip_proto)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->src_port != tuple_b->src_port)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_port != tuple_b->dst_port)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type != tuple_b->ip_type)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
if (tuple_a->src_addr.v4.s_addr != tuple_b->src_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_addr.v4.s_addr != tuple_b->dst_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (memcmp(&tuple_a->src_addr.v6, &tuple_b->src_addr.v6, sizeof(tuple_a->src_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (memcmp(&tuple_a->dst_addr.v6, &tuple_b->dst_addr.v6, sizeof(tuple_a->dst_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tuple6_cmp(const struct tuple6 *tuple_a, const struct tuple6 *tuple_b)
|
||||
{
|
||||
if (tuple_a->security_zone != tuple_b->security_zone)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_proto != tuple_b->ip_proto)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->src_port != tuple_b->src_port)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_port != tuple_b->dst_port)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type != tuple_b->ip_type)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
if (tuple_a->src_addr.v4.s_addr != tuple_b->src_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (tuple_a->dst_addr.v4.s_addr != tuple_b->dst_addr.v4.s_addr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (memcmp(&tuple_a->src_addr.v6, &tuple_b->src_addr.v6, sizeof(tuple_a->src_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (memcmp(&tuple_a->dst_addr.v6, &tuple_b->dst_addr.v6, sizeof(tuple_a->dst_addr.v6)) != 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void tuple2_reverse(const struct tuple2 *in, struct tuple2 *out)
|
||||
{
|
||||
out->ip_type = in->ip_type;
|
||||
|
||||
if (in->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
out->src_addr.v4.s_addr = in->dst_addr.v4.s_addr;
|
||||
out->dst_addr.v4.s_addr = in->src_addr.v4.s_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(&out->src_addr.v6, &in->dst_addr.v6, sizeof(in->dst_addr.v6));
|
||||
memcpy(&out->dst_addr.v6, &in->src_addr.v6, sizeof(in->src_addr.v6));
|
||||
}
|
||||
}
|
||||
|
||||
void tuple4_reverse(const struct tuple4 *in, struct tuple4 *out)
|
||||
{
|
||||
out->ip_type = in->ip_type;
|
||||
out->src_port = in->dst_port;
|
||||
out->dst_port = in->src_port;
|
||||
|
||||
if (in->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
out->src_addr.v4.s_addr = in->dst_addr.v4.s_addr;
|
||||
out->dst_addr.v4.s_addr = in->src_addr.v4.s_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(&out->src_addr.v6, &in->dst_addr.v6, sizeof(in->dst_addr.v6));
|
||||
memcpy(&out->dst_addr.v6, &in->src_addr.v6, sizeof(in->src_addr.v6));
|
||||
}
|
||||
}
|
||||
|
||||
void tuple5_reverse(const struct tuple5 *in, struct tuple5 *out)
|
||||
{
|
||||
out->ip_type = in->ip_type;
|
||||
out->ip_proto = in->ip_proto;
|
||||
out->src_port = in->dst_port;
|
||||
out->dst_port = in->src_port;
|
||||
|
||||
if (in->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
out->src_addr.v4.s_addr = in->dst_addr.v4.s_addr;
|
||||
out->dst_addr.v4.s_addr = in->src_addr.v4.s_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(&out->src_addr.v6, &in->dst_addr.v6, sizeof(in->dst_addr.v6));
|
||||
memcpy(&out->dst_addr.v6, &in->src_addr.v6, sizeof(in->src_addr.v6));
|
||||
}
|
||||
}
|
||||
|
||||
void tuple6_reverse(const struct tuple6 *in, struct tuple6 *out)
|
||||
{
|
||||
out->ip_type = in->ip_type;
|
||||
out->ip_proto = in->ip_proto;
|
||||
out->security_zone = in->security_zone;
|
||||
out->src_port = in->dst_port;
|
||||
out->dst_port = in->src_port;
|
||||
|
||||
if (in->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
out->src_addr.v4.s_addr = in->dst_addr.v4.s_addr;
|
||||
out->dst_addr.v4.s_addr = in->src_addr.v4.s_addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(&out->src_addr.v6, &in->dst_addr.v6, sizeof(in->dst_addr.v6));
|
||||
memcpy(&out->dst_addr.v6, &in->src_addr.v6, sizeof(in->src_addr.v6));
|
||||
}
|
||||
}
|
||||
|
||||
void tuple2_tostring(const struct tuple2 *tuple, char *buf, uint32_t size)
|
||||
{
|
||||
char src_addr[INET6_ADDRSTRLEN] = {0};
|
||||
char dst_addr[INET6_ADDRSTRLEN] = {0};
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
inet_ntop(AF_INET, &tuple->src_addr.v4, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET, &tuple->dst_addr.v4, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
else
|
||||
{
|
||||
inet_ntop(AF_INET6, &tuple->src_addr.v6, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET6, &tuple->dst_addr.v6, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
|
||||
snprintf(buf, size, "%s -> %s", src_addr, dst_addr);
|
||||
}
|
||||
|
||||
void tuple4_tostring(const struct tuple4 *tuple, char *buf, uint32_t size)
|
||||
{
|
||||
char src_addr[INET6_ADDRSTRLEN] = {0};
|
||||
char dst_addr[INET6_ADDRSTRLEN] = {0};
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
inet_ntop(AF_INET, &tuple->src_addr.v4, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET, &tuple->dst_addr.v4, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
else
|
||||
{
|
||||
inet_ntop(AF_INET6, &tuple->src_addr.v6, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET6, &tuple->dst_addr.v6, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
|
||||
snprintf(buf, size, "%s:%u -> %s:%u",
|
||||
src_addr, ntohs(tuple->src_port),
|
||||
dst_addr, ntohs(tuple->dst_port));
|
||||
}
|
||||
|
||||
void tuple5_tostring(const struct tuple5 *tuple, char *buf, uint32_t size)
|
||||
{
|
||||
char src_addr[INET6_ADDRSTRLEN] = {0};
|
||||
char dst_addr[INET6_ADDRSTRLEN] = {0};
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
inet_ntop(AF_INET, &tuple->src_addr.v4, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET, &tuple->dst_addr.v4, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
else
|
||||
{
|
||||
inet_ntop(AF_INET6, &tuple->src_addr.v6, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET6, &tuple->dst_addr.v6, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
|
||||
snprintf(buf, size, "%s:%u -> %s:%u, proto: %u",
|
||||
src_addr, ntohs(tuple->src_port),
|
||||
dst_addr, ntohs(tuple->dst_port),
|
||||
tuple->ip_proto);
|
||||
}
|
||||
|
||||
void tuple6_tostring(const struct tuple6 *tuple, char *buf, uint32_t size)
|
||||
{
|
||||
char src_addr[INET6_ADDRSTRLEN] = {0};
|
||||
char dst_addr[INET6_ADDRSTRLEN] = {0};
|
||||
|
||||
if (tuple->ip_type == IP_TYPE_V4)
|
||||
{
|
||||
inet_ntop(AF_INET, &tuple->src_addr.v4, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET, &tuple->dst_addr.v4, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
else
|
||||
{
|
||||
inet_ntop(AF_INET6, &tuple->src_addr.v6, src_addr, INET6_ADDRSTRLEN);
|
||||
inet_ntop(AF_INET6, &tuple->dst_addr.v6, dst_addr, INET6_ADDRSTRLEN);
|
||||
}
|
||||
|
||||
snprintf(buf, size, "%s:%u -> %s:%u, proto: %u, zone: %lu",
|
||||
src_addr, ntohs(tuple->src_port),
|
||||
dst_addr, ntohs(tuple->dst_port),
|
||||
tuple->ip_proto, tuple->security_zone);
|
||||
}
|
||||
Reference in New Issue
Block a user