This repository has been archived on 2025-09-14. You can view files and clone it, but cannot push or open issues or pull requests.
Files
stellar-stellar/infra/session_manager/session_manager.c

1429 lines
54 KiB
C
Raw Normal View History

#include <time.h>
2023-12-13 19:20:34 +08:00
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
2023-12-13 19:20:34 +08:00
2024-08-16 10:43:00 +08:00
#include "utils.h"
#include "packet_helper.h"
#include "packet_filter.h"
2024-08-20 18:43:51 +08:00
#include "session_private.h"
2023-12-13 19:20:34 +08:00
#include "session_pool.h"
#include "session_table.h"
#include "session_timer.h"
#include "session_filter.h"
#include "session_manager.h"
#include "session_transition.h"
2023-12-13 19:20:34 +08:00
#define SESSION_LOG_ERROR(format, ...) STELLAR_LOG_ERROR(__thread_local_logger, "session", format, ##__VA_ARGS__)
#define SESSION_LOG_DEBUG(format, ...) STELLAR_LOG_DEBUG(__thread_local_logger, "session", format, ##__VA_ARGS__)
#define SESSION_LOG_INFO(format, ...) STELLAR_LOG_INFO(__thread_local_logger, "session", format, ##__VA_ARGS__)
struct snowflake
{
uint64_t seed;
uint64_t sequence;
};
2023-12-13 19:20:34 +08:00
struct session_manager
{
struct session_list evicte_list;
2023-12-13 19:20:34 +08:00
struct session_pool *sess_pool;
struct session_timer *sess_timer;
struct session_table *tcp_sess_table;
struct session_table *udp_sess_table;
struct packet_filter *dup_pkt_filter;
struct session_filter *evicte_sess_filter;
2024-03-11 15:04:18 +08:00
struct session_manager_stat stat;
struct session_manager_config cfg;
/*
* only used for session_set_discard() or session_manager record_duplicated_packet(),
* because the function is called by pluin and has no time input.
*/
uint64_t now_ms;
uint64_t last_clean_expired_sess_ts;
struct snowflake *sf;
};
#define EVICTE_SESSION_BURST (RX_BURST_MAX)
2024-04-09 10:36:39 +08:00
/******************************************************************************
* session manager stat macro
2024-04-09 10:36:39 +08:00
******************************************************************************/
2024-08-16 18:41:02 +08:00
#define SESS_MGR_STAT_INC(stat, state, proto) \
{ \
switch ((state)) \
{ \
case SESSION_STATE_OPENING: \
(stat)->proto##_sess_opening++; \
break; \
case SESSION_STATE_ACTIVE: \
(stat)->proto##_sess_active++; \
break; \
case SESSION_STATE_CLOSING: \
(stat)->proto##_sess_closing++; \
break; \
case SESSION_STATE_DISCARD: \
(stat)->proto##_sess_discard++; \
break; \
case SESSION_STATE_CLOSED: \
(stat)->proto##_sess_closed++; \
break; \
default: \
break; \
} \
}
#define SESS_MGR_STAT_DEC(stat, state, proto) \
{ \
switch ((state)) \
{ \
case SESSION_STATE_OPENING: \
(stat)->proto##_sess_opening--; \
break; \
case SESSION_STATE_ACTIVE: \
(stat)->proto##_sess_active--; \
break; \
case SESSION_STATE_CLOSING: \
(stat)->proto##_sess_closing--; \
break; \
case SESSION_STATE_DISCARD: \
(stat)->proto##_sess_discard--; \
break; \
case SESSION_STATE_CLOSED: \
(stat)->proto##_sess_closed--; \
break; \
default: \
break; \
} \
2024-04-09 15:07:53 +08:00
}
2024-04-09 10:36:39 +08:00
2024-04-09 15:07:53 +08:00
#define SESS_MGR_STAT_UPDATE(stat, curr, next, proto) \
{ \
if (curr != next) \
{ \
SESS_MGR_STAT_DEC(stat, curr, proto); \
SESS_MGR_STAT_INC(stat, next, proto); \
} \
}
2024-04-09 10:36:39 +08:00
/******************************************************************************
* snowflake id generator
******************************************************************************/
static struct snowflake *snowflake_new(uint64_t seed)
{
struct snowflake *sf = (struct snowflake *)calloc(1, sizeof(struct snowflake));
if (sf == NULL)
{
return NULL;
}
sf->seed = seed & 0xFFFFF;
sf->sequence = 0;
return sf;
}
static void snowflake_free(struct snowflake *sf)
{
if (sf != NULL)
{
free(sf);
sf = NULL;
}
}
/*
* high -> low
*
* +------+------------------+----------------+------------------------+---------------------------+
* | 1bit | 12bit device_id | 8bit thread_id | 28bit timestamp in sec | 15bit sequence per thread |
* +------+------------------+----------------+------------------------+---------------------------+
*/
#define MAX_ID_PER_THREAD (32768)
#define MAX_ID_BASE_TIME (268435456L)
static uint64_t snowflake_generate(struct snowflake *sf, uint64_t now_sec)
{
uint64_t id = 0;
uint64_t id_per_thread = (sf->sequence++) % MAX_ID_PER_THREAD;
uint64_t id_base_time = now_sec % MAX_ID_BASE_TIME;
id = (sf->seed << 43) | (id_base_time << 15) | (id_per_thread);
return id;
}
2024-04-09 10:36:39 +08:00
/******************************************************************************
* TCP utils
2024-04-09 10:36:39 +08:00
******************************************************************************/
static void tcp_clean(struct session_manager *mgr, struct session *sess)
{
2024-09-02 17:49:33 +08:00
struct tcp_reassembly *c2s_ssembler = sess->tcp_halfs[FLOW_TYPE_C2S].assembler;
struct tcp_reassembly *s2c_ssembler = sess->tcp_halfs[FLOW_TYPE_S2C].assembler;
struct tcp_segment *seg;
if (c2s_ssembler)
{
while ((seg = tcp_reassembly_expire(c2s_ssembler, UINT64_MAX)))
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, FLOW_TYPE_C2S, STAT_TCP_SEGMENTS_RELEASED, 1);
session_inc_stat(sess, FLOW_TYPE_C2S, STAT_TCP_PAYLOADS_RELEASED, seg->len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_freed++;
tcp_segment_free(seg);
}
tcp_reassembly_free(c2s_ssembler);
}
if (s2c_ssembler)
{
while ((seg = tcp_reassembly_expire(s2c_ssembler, UINT64_MAX)))
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, FLOW_TYPE_S2C, STAT_TCP_SEGMENTS_RELEASED, 1);
session_inc_stat(sess, FLOW_TYPE_S2C, STAT_TCP_PAYLOADS_RELEASED, seg->len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_freed++;
tcp_segment_free(seg);
}
tcp_reassembly_free(s2c_ssembler);
}
}
static int tcp_init(struct session_manager *mgr, struct session *sess)
{
2024-08-30 18:33:41 +08:00
if (!mgr->cfg.tcp_reassembly.enable)
{
return 0;
}
2024-09-02 17:49:33 +08:00
sess->tcp_halfs[FLOW_TYPE_C2S].assembler = tcp_reassembly_new(mgr->cfg.tcp_reassembly.timeout_ms, mgr->cfg.tcp_reassembly.buffered_segments_max);
sess->tcp_halfs[FLOW_TYPE_S2C].assembler = tcp_reassembly_new(mgr->cfg.tcp_reassembly.timeout_ms, mgr->cfg.tcp_reassembly.buffered_segments_max);
if (sess->tcp_halfs[FLOW_TYPE_C2S].assembler == NULL || sess->tcp_halfs[FLOW_TYPE_S2C].assembler == NULL)
{
tcp_clean(mgr, sess);
return -1;
}
SESSION_LOG_DEBUG("session %lu %s new c2s tcp assembler %p, s2c tcp assembler %p",
2024-05-20 11:12:24 +08:00
session_get_id(sess), session_get0_readable_addr(sess),
2024-09-02 17:49:33 +08:00
sess->tcp_halfs[FLOW_TYPE_C2S].assembler,
sess->tcp_halfs[FLOW_TYPE_S2C].assembler);
return 0;
}
2024-09-02 17:49:33 +08:00
static void tcp_update(struct session_manager *mgr, struct session *sess, enum flow_type type, const struct layer_private *tcp_layer)
{
struct tcp_segment *seg;
struct tcphdr *hdr = (struct tcphdr *)tcp_layer->hdr_ptr;
2024-09-02 17:49:33 +08:00
struct tcp_half *half = &sess->tcp_halfs[type];
uint8_t flags = tcp_hdr_get_flags(hdr);
2024-04-09 15:07:53 +08:00
uint16_t len = tcp_layer->pld_len;
if ((flags & TH_SYN) && half->isn == 0)
{
half->isn = tcp_hdr_get_seq(hdr);
}
half->flags = flags;
half->history |= flags;
2024-04-03 18:59:46 +08:00
half->seq = tcp_hdr_get_seq(hdr);
half->ack = tcp_hdr_get_ack(hdr);
half->len = tcp_layer->pld_len;
2024-08-30 18:33:41 +08:00
if (!mgr->cfg.tcp_reassembly.enable)
{
2024-04-09 15:07:53 +08:00
if (len)
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_RECEIVED, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_RECEIVED, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_input++;
2024-04-09 15:07:53 +08:00
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_INORDER, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_INORDER, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_inorder++;
2024-04-09 15:07:53 +08:00
half->in_order.data = tcp_layer->pld_ptr;
half->in_order.len = len;
half->in_order_ref = 0;
}
return;
}
if (unlikely(flags & TH_SYN))
{
2024-05-06 12:06:20 +08:00
// len > 0 is SYN with data (TCP Fast Open)
tcp_reassembly_set_recv_next(half->assembler, len ? half->seq : half->seq + 1);
}
seg = tcp_reassembly_expire(half->assembler, mgr->now_ms);
if (seg)
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_EXPIRED, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_EXPIRED, seg->len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_timeout++;
2024-04-09 15:07:53 +08:00
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_RELEASED, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_RELEASED, seg->len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_freed++;
2024-04-09 15:07:53 +08:00
tcp_segment_free(seg);
}
2024-04-09 15:07:53 +08:00
if (len)
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_RECEIVED, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_RECEIVED, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_input++;
2024-04-03 18:59:46 +08:00
uint32_t rcv_nxt = tcp_reassembly_get_recv_next(half->assembler);
// in order
2024-04-03 18:59:46 +08:00
if (half->seq == rcv_nxt)
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_INORDER, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_INORDER, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_inorder++;
2024-04-09 10:36:39 +08:00
2024-04-09 15:07:53 +08:00
half->in_order.data = tcp_layer->pld_ptr;
half->in_order.len = len;
half->in_order_ref = 0;
2024-04-09 15:07:53 +08:00
tcp_reassembly_inc_recv_next(half->assembler, len);
}
// retransmission
else if (uint32_before(uint32_add(half->seq, len), rcv_nxt))
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_RETRANSMIT, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_RETRANSMIT, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_retransmited++;
}
2024-04-09 15:07:53 +08:00
else if ((seg = tcp_segment_new(half->seq, tcp_layer->pld_ptr, len)))
{
switch (tcp_reassembly_push(half->assembler, seg, mgr->now_ms))
{
case -2:
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_RETRANSMIT, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_RETRANSMIT, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_retransmited++;
tcp_segment_free(seg);
break;
case -1:
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_NOSPACE, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_NOSPACE, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_omitted_too_many++;
tcp_segment_free(seg);
break;
case 0:
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_BUFFERED, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_BUFFERED, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_buffered++;
break;
case 1:
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_OVERLAP, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_OVERLAP, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_overlapped++;
2024-04-09 15:07:53 +08:00
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_BUFFERED, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_BUFFERED, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_buffered++;
break;
default:
assert(0);
break;
}
}
2024-04-03 18:59:46 +08:00
else
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_TCP_SEGMENTS_NOSPACE, 1);
session_inc_stat(sess, type, STAT_TCP_PAYLOADS_NOSPACE, len);
2024-08-16 18:24:54 +08:00
mgr->stat.tcp_segs_omitted_too_many++;
2024-04-03 18:59:46 +08:00
}
}
}
/******************************************************************************
* session direction identify
******************************************************************************/
2023-12-13 19:20:34 +08:00
2024-09-02 17:49:33 +08:00
static enum flow_type identify_flow_type_by_port(uint16_t src_port, uint16_t dst_port)
{
// big port is client
if (src_port > dst_port)
{
2024-09-02 17:49:33 +08:00
return FLOW_TYPE_C2S;
}
else if (src_port < dst_port)
2023-12-19 10:47:26 +08:00
{
2024-09-02 17:49:33 +08:00
return FLOW_TYPE_S2C;
}
else
{
// if port is equal, first packet is C2S
2024-09-02 17:49:33 +08:00
return FLOW_TYPE_C2S;
2023-12-19 10:47:26 +08:00
}
2023-12-13 19:20:34 +08:00
}
2024-09-02 17:49:33 +08:00
static enum flow_type identify_flow_type_by_history(const struct session *sess, const struct tuple6 *key)
2023-12-13 19:20:34 +08:00
{
if (tuple6_cmp(session_get_tuple6(sess), key) == 0)
2023-12-19 10:47:26 +08:00
{
2024-09-02 17:49:33 +08:00
return FLOW_TYPE_C2S;
}
else
{
2024-09-02 17:49:33 +08:00
return FLOW_TYPE_S2C;
2023-12-19 10:47:26 +08:00
}
2023-12-13 19:20:34 +08:00
}
/******************************************************************************
* session filter bypass utils
******************************************************************************/
2024-04-09 15:07:53 +08:00
// on new session
static int tcp_overload_bypass(struct session_manager *mgr, const struct tuple6 *key)
2023-12-13 19:20:34 +08:00
{
2024-08-30 18:33:41 +08:00
if (key->ip_proto == IPPROTO_TCP && mgr->stat.tcp_sess_used >= mgr->cfg.tcp_session_max)
2024-04-09 15:07:53 +08:00
{
2024-08-16 18:32:35 +08:00
mgr->stat.tcp_pkts_bypass_table_full++;
2024-04-09 15:07:53 +08:00
return 1;
}
return 0;
}
static int udp_overload_bypass(struct session_manager *mgr, const struct tuple6 *key)
{
2024-08-30 18:33:41 +08:00
if (key->ip_proto == IPPROTO_UDP && mgr->stat.udp_sess_used >= mgr->cfg.udp_session_max)
2023-12-13 19:20:34 +08:00
{
2024-08-16 18:32:35 +08:00
mgr->stat.udp_pkts_bypass_table_full++;
2024-04-09 15:07:53 +08:00
return 1;
2023-12-13 19:20:34 +08:00
}
return 0;
}
static int evicted_session_bypass(struct session_manager *mgr, const struct tuple6 *key)
{
2024-08-30 18:33:41 +08:00
if (mgr->cfg.evicted_session_bloom_filter.enable && session_filter_lookup(mgr->evicte_sess_filter, key, mgr->now_ms))
{
2024-08-16 18:32:35 +08:00
mgr->stat.udp_pkts_bypass_session_evicted++;
return 1;
}
return 0;
2023-12-13 19:20:34 +08:00
}
2024-04-09 15:07:53 +08:00
// on update session
static int duplicated_packet_bypass(struct session_manager *mgr, struct session *sess, const struct packet *pkt, const struct tuple6 *key)
2023-12-19 10:47:26 +08:00
{
2024-08-30 18:33:41 +08:00
if (mgr->cfg.duplicated_packet_bloom_filter.enable == 0)
{
return 0;
}
2024-09-02 17:49:33 +08:00
enum flow_type type = identify_flow_type_by_history(sess, key);
if (session_get_stat(sess, type, STAT_RAW_PACKETS_RECEIVED) < 3 || session_has_duplicate_traffic(sess))
2023-12-19 10:47:26 +08:00
{
if (packet_filter_lookup(mgr->dup_pkt_filter, pkt, mgr->now_ms))
{
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_DUPLICATE_PACKETS_BYPASS, 1);
session_inc_stat(sess, type, STAT_DUPLICATE_BYTES_BYPASS, packet_get_raw_len(pkt));
switch (session_get_type(sess))
{
case SESSION_TYPE_TCP:
2024-08-16 18:32:35 +08:00
mgr->stat.tcp_pkts_bypass_duplicated++;
break;
case SESSION_TYPE_UDP:
2024-08-16 18:32:35 +08:00
mgr->stat.udp_pkts_bypass_duplicated++;
break;
default:
assert(0);
break;
}
session_set_duplicate_traffic(sess);
session_set_current_packet(sess, pkt);
2024-09-02 17:49:33 +08:00
session_set_flow_type(sess, type);
return 1;
}
else
{
packet_filter_add(mgr->dup_pkt_filter, pkt, mgr->now_ms);
return 0;
}
2023-12-19 10:47:26 +08:00
}
return 0;
2023-12-19 10:47:26 +08:00
}
/******************************************************************************
* session manager utils
******************************************************************************/
2024-09-02 17:49:33 +08:00
static void session_update(struct session_manager *mgr, struct session *sess, enum session_state next_state, const struct packet *pkt, const struct tuple6 *key, enum flow_type type)
{
2024-05-20 11:12:24 +08:00
if (session_get_current_state(sess) == SESSION_STATE_INIT)
{
uint64_t sess_id = snowflake_generate(mgr->sf, mgr->now_ms / 1000);
session_set_id(sess, sess_id);
enum packet_direction pkt_dir = packet_get_direction(pkt);
2024-09-02 17:49:33 +08:00
if (type == FLOW_TYPE_C2S)
{
2024-08-20 18:43:51 +08:00
session_set_tuple6(sess, key);
if (pkt_dir == PACKET_DIRECTION_OUTGOING) // Internal -> External
{
session_set_direction(sess, SESSION_DIRECTION_OUTBOUND);
}
else
{
session_set_direction(sess, SESSION_DIRECTION_INBOUND);
}
2024-08-20 18:43:51 +08:00
tuple6_to_str(key, sess->tuple_str, sizeof(sess->tuple_str));
}
else
{
2024-08-20 18:43:51 +08:00
struct tuple6 out;
tuple6_reverse(key, &out);
session_set_tuple6(sess, &out);
if (pkt_dir == PACKET_DIRECTION_OUTGOING) // Internal -> External
{
session_set_direction(sess, SESSION_DIRECTION_INBOUND);
}
else
{
session_set_direction(sess, SESSION_DIRECTION_OUTBOUND);
}
2024-08-20 18:43:51 +08:00
tuple6_to_str(&out, sess->tuple_str, sizeof(sess->tuple_str));
}
session_set_timestamp(sess, SESSION_TIMESTAMP_START, mgr->now_ms);
switch (key->ip_proto)
{
case IPPROTO_TCP:
session_set_type(sess, SESSION_TYPE_TCP);
break;
case IPPROTO_UDP:
session_set_type(sess, SESSION_TYPE_UDP);
break;
default:
assert(0);
break;
}
}
2024-09-02 17:49:33 +08:00
session_inc_stat(sess, type, STAT_RAW_PACKETS_RECEIVED, 1);
session_inc_stat(sess, type, STAT_RAW_BYTES_RECEIVED, packet_get_raw_len(pkt));
2024-04-09 15:07:53 +08:00
2024-09-02 17:49:33 +08:00
if (!session_get_first_packet(sess, type))
2023-12-19 10:47:26 +08:00
{
2024-09-02 17:49:33 +08:00
session_set_first_packet(sess, type, packet_dup(pkt));
session_set_route_ctx(sess, type, packet_get_route_ctx(pkt));
session_set_sids(sess, type, packet_get_sids(pkt));
2023-12-19 10:47:26 +08:00
}
2024-04-09 15:07:53 +08:00
session_set_current_packet(sess, pkt);
2024-09-02 17:49:33 +08:00
session_set_flow_type(sess, type);
session_set_timestamp(sess, SESSION_TIMESTAMP_LAST, mgr->now_ms);
2024-05-20 11:12:24 +08:00
session_set_current_state(sess, next_state);
}
2023-12-19 10:47:26 +08:00
static void session_manager_evicte_session(struct session_manager *mgr, struct session *sess, int reason)
{
if (sess == NULL)
{
return;
}
// when session add to evicted queue, session lifetime is over
2024-05-20 11:12:24 +08:00
enum session_state curr_state = session_get_current_state(sess);
enum session_state next_state = session_transition_run(curr_state, reason);
session_transition_log(sess, curr_state, next_state, reason);
2024-05-20 11:12:24 +08:00
session_set_current_state(sess, next_state);
2024-04-09 15:07:53 +08:00
if (!session_get_closing_reason(sess))
{
if (reason == PORT_REUSE_EVICT)
{
session_set_closing_reason(sess, CLOSING_BY_PORT_REUSE_EVICTED);
}
if (reason == LRU_EVICT)
{
session_set_closing_reason(sess, CLOSING_BY_LRU_EVICTED);
}
2024-04-09 15:07:53 +08:00
}
session_timer_del(mgr->sess_timer, sess);
TAILQ_INSERT_TAIL(&mgr->evicte_list, sess, evicte_tqe);
switch (session_get_type(sess))
2023-12-19 10:47:26 +08:00
{
case SESSION_TYPE_TCP:
SESSION_LOG_DEBUG("evicte tcp old session: %lu", session_get_id(sess));
2024-04-30 15:29:31 +08:00
session_table_del(mgr->tcp_sess_table, sess);
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, tcp);
mgr->stat.tcp_sess_evicted++;
break;
case SESSION_TYPE_UDP:
SESSION_LOG_DEBUG("evicte udp old session: %lu", session_get_id(sess));
2024-04-30 15:29:31 +08:00
session_table_del(mgr->udp_sess_table, sess);
2024-08-30 18:33:41 +08:00
if (mgr->cfg.evicted_session_bloom_filter.enable)
{
session_filter_add(mgr->evicte_sess_filter, session_get_tuple6(sess), mgr->now_ms);
}
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, udp);
mgr->stat.udp_sess_evicted++;
break;
default:
assert(0);
break;
2023-12-19 10:47:26 +08:00
}
}
static struct session *session_manager_lookup_tcp_session(struct session_manager *mgr, const struct packet *pkt, const struct tuple6 *key)
{
struct session *sess = session_table_find_tuple6(mgr->tcp_sess_table, key, 0);
if (sess == NULL)
{
return NULL;
}
const struct layer_private *tcp_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_TCP);
const struct tcphdr *hdr = (const struct tcphdr *)tcp_layer->hdr_ptr;
uint8_t flags = tcp_hdr_get_flags(hdr);
if ((flags & TH_SYN) == 0)
{
return sess;
}
2024-09-02 17:49:33 +08:00
enum flow_type type = identify_flow_type_by_history(sess, key);
struct tcp_half *half = &sess->tcp_halfs[type];
if ((half->isn && half->isn != tcp_hdr_get_seq(hdr)) || // recv SYN with different ISN
((half->history & TH_FIN) || (half->history & TH_RST))) // recv SYN after FIN or RST
{
// TCP port reuse, evict old session
session_manager_evicte_session(mgr, sess, PORT_REUSE_EVICT);
return NULL;
}
else
{
// TCP SYN retransmission
return sess;
}
}
static struct session *session_manager_new_tcp_session(struct session_manager *mgr, const struct packet *pkt, const struct tuple6 *key)
2023-12-19 10:47:26 +08:00
{
const struct layer_private *tcp_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_TCP);
2024-01-03 09:57:06 +08:00
const struct tcphdr *hdr = (const struct tcphdr *)tcp_layer->hdr_ptr;
2024-03-29 16:32:16 +08:00
uint8_t flags = tcp_hdr_get_flags(hdr);
if (!(flags & TH_SYN))
2023-12-19 10:47:26 +08:00
{
2024-08-16 18:32:35 +08:00
mgr->stat.tcp_pkts_bypass_session_not_found++;
return NULL;
2023-12-19 10:47:26 +08:00
}
// tcp table full evict old session
2024-08-30 18:33:41 +08:00
if (mgr->cfg.evict_old_on_tcp_table_limit && mgr->stat.tcp_sess_used >= mgr->cfg.tcp_session_max - EVICTE_SESSION_BURST)
{
struct session *evic_sess = session_table_find_lru(mgr->tcp_sess_table);
session_manager_evicte_session(mgr, evic_sess, LRU_EVICT);
}
2023-12-19 10:47:26 +08:00
2024-09-02 17:49:33 +08:00
enum flow_type type = (flags & TH_ACK) ? FLOW_TYPE_S2C : FLOW_TYPE_C2S;
struct session *sess = session_pool_pop(mgr->sess_pool);
if (sess == NULL)
{
assert(0);
return NULL;
}
session_init(sess);
2024-05-20 17:02:16 +08:00
sess->mgr = mgr;
2024-04-09 10:36:39 +08:00
sess->mgr_stat = &mgr->stat;
enum session_state next_state = session_transition_run(SESSION_STATE_INIT, TCP_SYN);
2024-09-02 17:49:33 +08:00
session_update(mgr, sess, next_state, pkt, key, type);
session_transition_log(sess, SESSION_STATE_INIT, next_state, TCP_SYN);
if (tcp_init(mgr, sess) == -1)
2024-03-26 15:09:03 +08:00
{
assert(0);
session_pool_push(mgr->sess_pool, sess);
return NULL;
}
2024-09-02 17:49:33 +08:00
tcp_update(mgr, sess, type, tcp_layer);
2024-08-30 18:33:41 +08:00
uint64_t timeout = (flags & TH_ACK) ? mgr->cfg.tcp_timeout_ms.handshake : mgr->cfg.tcp_timeout_ms.init;
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + timeout);
2024-04-30 15:29:31 +08:00
session_table_add(mgr->tcp_sess_table, sess);
2024-01-31 14:45:50 +08:00
2024-08-30 18:33:41 +08:00
if (mgr->cfg.duplicated_packet_bloom_filter.enable)
{
packet_filter_add(mgr->dup_pkt_filter, pkt, mgr->now_ms);
}
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_INC(&mgr->stat, next_state, tcp);
2024-08-16 18:41:02 +08:00
mgr->stat.tcp_sess_used++;
mgr->stat.history_tcp_sessions++;
2023-12-19 10:47:26 +08:00
return sess;
}
2023-12-19 10:47:26 +08:00
static struct session *session_manager_new_udp_session(struct session_manager *mgr, const struct packet *pkt, const struct tuple6 *key)
{
// udp table full evict old session
2024-08-30 18:33:41 +08:00
if (mgr->cfg.evict_old_on_udp_table_limit && mgr->stat.udp_sess_used >= mgr->cfg.udp_session_max - EVICTE_SESSION_BURST)
{
struct session *evic_sess = session_table_find_lru(mgr->udp_sess_table);
session_manager_evicte_session(mgr, evic_sess, LRU_EVICT);
}
struct session *sess = session_pool_pop(mgr->sess_pool);
if (sess == NULL)
{
assert(sess);
return NULL;
}
session_init(sess);
2024-05-20 17:02:16 +08:00
sess->mgr = mgr;
2024-04-09 10:36:39 +08:00
sess->mgr_stat = &mgr->stat;
2024-09-02 17:49:33 +08:00
enum flow_type type = identify_flow_type_by_port(ntohs(key->src_port), ntohs(key->dst_port));
enum session_state next_state = session_transition_run(SESSION_STATE_INIT, UDP_DATA);
2024-09-02 17:49:33 +08:00
session_update(mgr, sess, next_state, pkt, key, type);
2024-03-26 15:09:03 +08:00
session_transition_log(sess, SESSION_STATE_INIT, next_state, UDP_DATA);
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + mgr->cfg.udp_timeout_ms.data);
2024-04-30 15:29:31 +08:00
session_table_add(mgr->udp_sess_table, sess);
2024-01-31 14:45:50 +08:00
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_INC(&mgr->stat, next_state, udp);
2024-08-16 18:41:02 +08:00
mgr->stat.udp_sess_used++;
mgr->stat.history_udp_sessions++;
2024-04-09 10:36:39 +08:00
return sess;
}
static int session_manager_update_tcp_session(struct session_manager *mgr, struct session *sess, const struct packet *pkt, const struct tuple6 *key)
{
const struct layer_private *tcp_layer = packet_get_innermost_layer(pkt, LAYER_PROTO_TCP);
const struct tcphdr *hdr = (const struct tcphdr *)tcp_layer->hdr_ptr;
2024-09-02 17:49:33 +08:00
enum flow_type type = identify_flow_type_by_history(sess, key);
2024-03-29 16:32:16 +08:00
uint8_t flags = tcp_hdr_get_flags(hdr);
2024-04-03 18:59:46 +08:00
int inputs = 0;
inputs |= (flags & TH_SYN) ? TCP_SYN : NONE;
2024-03-29 16:32:16 +08:00
inputs |= (flags & TH_FIN) ? TCP_FIN : NONE;
inputs |= (flags & TH_RST) ? TCP_RST : NONE;
inputs |= tcp_layer->pld_len ? TCP_DATA : NONE;
2024-04-03 18:59:46 +08:00
// update state
2024-05-20 11:12:24 +08:00
enum session_state curr_state = session_get_current_state(sess);
enum session_state next_state = session_transition_run(curr_state, inputs);
2024-04-03 18:59:46 +08:00
// update session
2024-09-02 17:49:33 +08:00
session_update(mgr, sess, next_state, pkt, key, type);
2024-04-03 18:59:46 +08:00
session_transition_log(sess, curr_state, next_state, inputs);
2024-04-09 15:07:53 +08:00
// update tcp
2024-09-02 17:49:33 +08:00
tcp_update(mgr, sess, type, tcp_layer);
2024-03-29 16:32:16 +08:00
// set closing reason
if (next_state == SESSION_STATE_CLOSING && !session_get_closing_reason(sess))
2024-03-26 15:09:03 +08:00
{
2024-04-03 18:59:46 +08:00
if (flags & TH_FIN)
2024-03-29 16:32:16 +08:00
{
2024-09-02 17:49:33 +08:00
session_set_closing_reason(sess, (type == FLOW_TYPE_C2S ? CLOSING_BY_CLIENT_FIN : CLOSING_BY_SERVER_FIN));
2024-03-29 16:32:16 +08:00
}
2024-04-03 18:59:46 +08:00
if (flags & TH_RST)
2024-03-29 16:32:16 +08:00
{
2024-09-02 17:49:33 +08:00
session_set_closing_reason(sess, (type == FLOW_TYPE_C2S ? CLOSING_BY_CLIENT_RST : CLOSING_BY_SERVER_RST));
2024-03-29 16:32:16 +08:00
}
2024-03-26 15:09:03 +08:00
}
2024-04-03 18:59:46 +08:00
// update timeout
2024-09-02 17:49:33 +08:00
struct tcp_half *curr = &sess->tcp_halfs[type];
struct tcp_half *peer = &sess->tcp_halfs[(type == FLOW_TYPE_C2S ? FLOW_TYPE_S2C : FLOW_TYPE_C2S)];
uint64_t timeout = 0;
switch (next_state)
2023-12-19 10:47:26 +08:00
{
case SESSION_STATE_OPENING:
2024-03-29 16:32:16 +08:00
if (flags & TH_SYN)
{
2024-08-30 18:33:41 +08:00
timeout = (flags & TH_ACK) ? mgr->cfg.tcp_timeout_ms.handshake : mgr->cfg.tcp_timeout_ms.init;
}
else
{
2024-08-30 18:33:41 +08:00
timeout = mgr->cfg.tcp_timeout_ms.data;
}
break;
case SESSION_STATE_ACTIVE:
2024-08-30 18:33:41 +08:00
timeout = mgr->cfg.tcp_timeout_ms.data;
break;
case SESSION_STATE_CLOSING:
2024-03-29 16:32:16 +08:00
if (flags & TH_FIN)
{
2024-08-30 18:33:41 +08:00
timeout = (peer->history & TH_FIN) ? mgr->cfg.tcp_timeout_ms.time_wait : mgr->cfg.tcp_timeout_ms.half_closed;
2024-03-29 16:32:16 +08:00
}
else if (flags & TH_RST)
{
2024-04-03 18:59:46 +08:00
// if fin is received, the expected sequence number should be increased by 1
uint32_t expected = (peer->history & TH_FIN) ? peer->ack + 1 : peer->ack;
2024-08-30 18:33:41 +08:00
timeout = (expected == curr->seq) ? mgr->cfg.tcp_timeout_ms.time_wait : mgr->cfg.tcp_timeout_ms.unverified_rst;
2024-03-29 16:32:16 +08:00
}
else
{
2024-08-30 18:33:41 +08:00
timeout = mgr->cfg.tcp_timeout_ms.data;
2024-03-29 16:32:16 +08:00
}
break;
case SESSION_STATE_DISCARD:
2024-08-30 18:33:41 +08:00
timeout = mgr->cfg.tcp_timeout_ms.discard_default;
break;
default:
assert(0);
break;
2023-12-19 10:47:26 +08:00
}
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + timeout);
2023-12-19 10:47:26 +08:00
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, tcp);
2024-04-09 10:36:39 +08:00
2024-03-08 18:10:38 +08:00
return 0;
2023-12-19 10:47:26 +08:00
}
static int session_manager_update_udp_session(struct session_manager *mgr, struct session *sess, const struct packet *pkt, const struct tuple6 *key)
2023-12-19 10:47:26 +08:00
{
2024-09-02 17:49:33 +08:00
enum flow_type type = identify_flow_type_by_history(sess, key);
2024-05-20 11:12:24 +08:00
enum session_state curr_state = session_get_current_state(sess);
enum session_state next_state = session_transition_run(curr_state, UDP_DATA);
2024-09-02 17:49:33 +08:00
session_update(mgr, sess, next_state, pkt, key, type);
2024-03-26 15:09:03 +08:00
session_transition_log(sess, curr_state, next_state, UDP_DATA);
2024-05-20 11:12:24 +08:00
if (session_get_current_state(sess) == SESSION_STATE_DISCARD)
{
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + mgr->cfg.udp_timeout_ms.discard_default);
}
else
{
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + mgr->cfg.udp_timeout_ms.data);
}
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, udp);
2024-04-09 10:36:39 +08:00
return 0;
2023-12-13 19:20:34 +08:00
}
static inline uint8_t ipv4_in_range(const struct in_addr *addr, const struct in_addr *start, const struct in_addr *end)
{
return (memcmp(addr, start, sizeof(struct in_addr)) >= 0 && memcmp(addr, end, sizeof(struct in_addr)) <= 0);
}
static inline uint8_t ipv6_in_range(const struct in6_addr *addr, const struct in6_addr *start, const struct in6_addr *end)
{
return (memcmp(addr, start, sizeof(struct in6_addr)) >= 0 && memcmp(addr, end, sizeof(struct in6_addr)) <= 0);
}
2023-12-13 19:20:34 +08:00
/******************************************************************************
* session manager public API
2023-12-13 19:20:34 +08:00
******************************************************************************/
2024-08-30 18:33:41 +08:00
struct session_manager_config *session_manager_config_new(const char *toml_file)
2023-12-13 19:20:34 +08:00
{
2024-08-30 18:33:41 +08:00
if (toml_file == NULL)
{
2024-08-30 18:33:41 +08:00
return NULL;
}
2024-08-30 18:33:41 +08:00
struct session_manager_config *cfg = (struct session_manager_config *)calloc(1, sizeof(struct session_manager_config));
if (cfg == NULL)
{
2024-08-30 18:33:41 +08:00
return NULL;
}
2024-08-30 18:33:41 +08:00
int ret = 0;
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_session_max", (uint64_t *)&cfg->tcp_session_max, EVICTE_SESSION_BURST * 2, UINT64_MAX);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.udp_session_max", (uint64_t *)&cfg->udp_session_max, EVICTE_SESSION_BURST * 2, UINT64_MAX);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evict_old_on_tcp_table_limit", (uint64_t *)&cfg->evict_old_on_tcp_table_limit, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evict_old_on_udp_table_limit", (uint64_t *)&cfg->evict_old_on_udp_table_limit, 0, 1);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.expire_period_ms", (uint64_t *)&cfg->expire_period_ms, 0, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.expire_batch_max", (uint64_t *)&cfg->expire_batch_max, 1, 1024);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.init", (uint64_t *)&cfg->tcp_timeout_ms.init, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.handshake", (uint64_t *)&cfg->tcp_timeout_ms.handshake, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.data", (uint64_t *)&cfg->tcp_timeout_ms.data, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.half_closed", (uint64_t *)&cfg->tcp_timeout_ms.half_closed, 1, 604800000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.time_wait", (uint64_t *)&cfg->tcp_timeout_ms.time_wait, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.discard_default", (uint64_t *)&cfg->tcp_timeout_ms.discard_default, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_timeout_ms.unverified_rst", (uint64_t *)&cfg->tcp_timeout_ms.unverified_rst, 1, 60000);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.udp_timeout_ms.data", (uint64_t *)&cfg->udp_timeout_ms.data, 1, 15999999000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.udp_timeout_ms.discard_default", (uint64_t *)&cfg->udp_timeout_ms.discard_default, 1, 15999999000);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.enable", (uint64_t *)&cfg->duplicated_packet_bloom_filter.enable, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.capacity", (uint64_t *)&cfg->duplicated_packet_bloom_filter.capacity, 1, 4294967295);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.duplicated_packet_bloom_filter.time_window_ms", (uint64_t *)&cfg->duplicated_packet_bloom_filter.time_window_ms, 1, 60000);
ret += load_and_validate_toml_double_config(toml_file, "session_manager.duplicated_packet_bloom_filter.error_rate", (double *)&cfg->duplicated_packet_bloom_filter.error_rate, 0.0, 1.0);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.enable", (uint64_t *)&cfg->evicted_session_bloom_filter.enable, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.capacity", (uint64_t *)&cfg->evicted_session_bloom_filter.capacity, 1, 4294967295);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.evicted_session_bloom_filter.time_window_ms", (uint64_t *)&cfg->evicted_session_bloom_filter.time_window_ms, 1, 60000);
ret += load_and_validate_toml_double_config(toml_file, "session_manager.evicted_session_bloom_filter.error_rate", (double *)&cfg->evicted_session_bloom_filter.error_rate, 0.0, 1.0);
2024-08-30 18:33:41 +08:00
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_reassembly.enable", (uint64_t *)&cfg->tcp_reassembly.enable, 0, 1);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_reassembly.timeout_ms", (uint64_t *)&cfg->tcp_reassembly.timeout_ms, 1, 60000);
ret += load_and_validate_toml_integer_config(toml_file, "session_manager.tcp_reassembly.buffered_segments_max", (uint64_t *)&cfg->tcp_reassembly.buffered_segments_max, 1, 512);
2024-08-30 18:33:41 +08:00
if (ret != 0)
{
session_manager_config_free(cfg);
return NULL;
}
return cfg;
}
void session_manager_config_free(struct session_manager_config *cfg)
{
if (cfg)
{
free(cfg);
cfg = NULL;
}
}
void session_manager_config_print(struct session_manager_config *cfg)
{
if (cfg)
{
// max session number
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.tcp_session_max : %lu", cfg->tcp_session_max);
SESSION_LOG_INFO("session_manager.udp_session_max : %lu", cfg->udp_session_max);
// session overload
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.evict_old_on_tcp_table_limit : %d", cfg->evict_old_on_tcp_table_limit);
SESSION_LOG_INFO("session_manager.evict_old_on_udp_table_limit : %d", cfg->evict_old_on_udp_table_limit);
// TCP timeout
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.init : %lu", cfg->tcp_timeout_ms.init);
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.handshake : %lu", cfg->tcp_timeout_ms.handshake);
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.data : %lu", cfg->tcp_timeout_ms.data);
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.half_closed : %lu", cfg->tcp_timeout_ms.half_closed);
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.time_wait : %lu", cfg->tcp_timeout_ms.time_wait);
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.discard_default : %lu", cfg->tcp_timeout_ms.discard_default);
SESSION_LOG_INFO("session_manager.tcp_timeout_ms.unverified_rst : %lu", cfg->tcp_timeout_ms.unverified_rst);
// UDP timeout
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.udp_timeout_ms.data : %lu", cfg->udp_timeout_ms.data);
SESSION_LOG_INFO("session_manager.udp_timeout_ms.discard_default : %lu", cfg->udp_timeout_ms.discard_default);
// limit
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.expire_period_ms : %lu", cfg->expire_period_ms);
SESSION_LOG_INFO("session_manager.expire_batch_max : %lu", cfg->expire_batch_max);
// duplicated packet filter
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.duplicated_packet_bloom_filter.enable : %d", cfg->duplicated_packet_bloom_filter.enable);
SESSION_LOG_INFO("session_manager.duplicated_packet_bloom_filter.capacity : %lu", cfg->duplicated_packet_bloom_filter.capacity);
SESSION_LOG_INFO("session_manager.duplicated_packet_bloom_filter.time_window_ms : %lu", cfg->duplicated_packet_bloom_filter.time_window_ms);
SESSION_LOG_INFO("session_manager.duplicated_packet_bloom_filter.error_rate : %f", cfg->duplicated_packet_bloom_filter.error_rate);
// eviction session filter
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.evicted_session_bloom_filter.enable : %d", cfg->evicted_session_bloom_filter.enable);
SESSION_LOG_INFO("session_manager.evicted_session_bloom_filter.capacity : %lu", cfg->evicted_session_bloom_filter.capacity);
SESSION_LOG_INFO("session_manager.evicted_session_bloom_filter.time_window_ms : %lu", cfg->evicted_session_bloom_filter.time_window_ms);
SESSION_LOG_INFO("session_manager.evicted_session_bloom_filter.error_rate : %f", cfg->evicted_session_bloom_filter.error_rate);
// TCP reassembly
2024-08-30 18:33:41 +08:00
SESSION_LOG_INFO("session_manager.tcp_reassembly.enable : %d", cfg->tcp_reassembly.enable);
SESSION_LOG_INFO("session_manager.tcp_reassembly.timeout_ms : %lu", cfg->tcp_reassembly.timeout_ms);
SESSION_LOG_INFO("session_manager.tcp_reassembly.buffered_segments_max : %lu", cfg->tcp_reassembly.buffered_segments_max);
}
}
struct session_manager *session_manager_new(const struct session_manager_config *cfg, uint64_t now_ms)
{
2023-12-13 19:20:34 +08:00
struct session_manager *mgr = (struct session_manager *)calloc(1, sizeof(struct session_manager));
if (mgr == NULL)
{
return NULL;
}
memcpy(&mgr->cfg, cfg, sizeof(struct session_manager_config));
2024-04-03 18:59:46 +08:00
2024-08-30 18:33:41 +08:00
mgr->sess_pool = session_pool_new(mgr->cfg.tcp_session_max + mgr->cfg.udp_session_max);
mgr->tcp_sess_table = session_table_new();
mgr->udp_sess_table = session_table_new();
mgr->sess_timer = session_timer_new(now_ms);
if (mgr->sess_pool == NULL || mgr->tcp_sess_table == NULL || mgr->udp_sess_table == NULL || mgr->sess_timer == NULL)
{
goto error;
}
2024-08-30 18:33:41 +08:00
if (mgr->cfg.evicted_session_bloom_filter.enable)
{
2024-08-30 18:33:41 +08:00
mgr->evicte_sess_filter = session_filter_new(mgr->cfg.evicted_session_bloom_filter.capacity,
mgr->cfg.evicted_session_bloom_filter.time_window_ms,
mgr->cfg.evicted_session_bloom_filter.error_rate, now_ms);
if (mgr->evicte_sess_filter == NULL)
{
goto error;
}
}
2024-08-30 18:33:41 +08:00
if (mgr->cfg.duplicated_packet_bloom_filter.enable)
{
2024-08-30 18:33:41 +08:00
mgr->dup_pkt_filter = packet_filter_new(mgr->cfg.duplicated_packet_bloom_filter.capacity,
mgr->cfg.duplicated_packet_bloom_filter.time_window_ms,
mgr->cfg.duplicated_packet_bloom_filter.error_rate, now_ms);
if (mgr->dup_pkt_filter == NULL)
{
goto error;
}
}
mgr->sf = snowflake_new(mgr->cfg.session_id_seed);
if (mgr->sf == NULL)
{
goto error;
}
2023-12-19 10:47:26 +08:00
TAILQ_INIT(&mgr->evicte_list);
2024-03-29 16:32:16 +08:00
session_transition_init();
mgr->now_ms = now_ms;
mgr->last_clean_expired_sess_ts = now_ms;
2024-03-29 16:32:16 +08:00
2023-12-13 19:20:34 +08:00
return mgr;
error:
session_manager_free(mgr);
2023-12-13 19:20:34 +08:00
return NULL;
}
void session_manager_free(struct session_manager *mgr)
2023-12-13 19:20:34 +08:00
{
2024-01-31 14:45:50 +08:00
struct session *sess;
2023-12-13 19:20:34 +08:00
if (mgr)
{
// free all evicted session
while ((sess = TAILQ_FIRST(&mgr->evicte_list)))
2024-01-31 14:45:50 +08:00
{
TAILQ_REMOVE(&mgr->evicte_list, sess, evicte_tqe);
2024-03-08 18:10:38 +08:00
session_manager_free_session(mgr, sess);
2024-01-31 14:45:50 +08:00
}
// free all udp session
while (mgr->udp_sess_table && (sess = session_table_find_lru(mgr->udp_sess_table)))
2024-01-31 14:45:50 +08:00
{
session_manager_free_session(mgr, sess);
}
// free all tcp session
while (mgr->tcp_sess_table && (sess = session_table_find_lru(mgr->tcp_sess_table)))
2024-01-31 14:45:50 +08:00
{
session_manager_free_session(mgr, sess);
}
2024-08-30 18:33:41 +08:00
if (mgr->cfg.evicted_session_bloom_filter.enable)
{
session_filter_free(mgr->evicte_sess_filter);
}
2024-08-30 18:33:41 +08:00
if (mgr->cfg.duplicated_packet_bloom_filter.enable)
{
packet_filter_free(mgr->dup_pkt_filter);
}
snowflake_free(mgr->sf);
session_timer_free(mgr->sess_timer);
session_table_free(mgr->udp_sess_table);
session_table_free(mgr->tcp_sess_table);
session_pool_free(mgr->sess_pool);
2023-12-13 19:20:34 +08:00
free(mgr);
mgr = NULL;
}
}
void session_manager_record_duplicated_packet(struct session_manager *mgr, const struct packet *pkt)
{
2024-08-30 18:33:41 +08:00
if (mgr->cfg.duplicated_packet_bloom_filter.enable)
{
packet_filter_add(mgr->dup_pkt_filter, pkt, mgr->now_ms);
}
}
struct session *session_manager_new_session(struct session_manager *mgr, const struct packet *pkt, uint64_t now_ms)
{
mgr->now_ms = now_ms;
struct tuple6 key;
if (packet_get_innermost_tuple6(pkt, &key))
{
return NULL;
}
switch (key.ip_proto)
2023-12-13 19:20:34 +08:00
{
case IPPROTO_TCP:
if (tcp_overload_bypass(mgr, &key))
2024-04-09 15:07:53 +08:00
{
return NULL;
}
return session_manager_new_tcp_session(mgr, pkt, &key);
case IPPROTO_UDP:
if (udp_overload_bypass(mgr, &key))
2024-04-09 15:07:53 +08:00
{
return NULL;
}
if (evicted_session_bypass(mgr, &key))
2024-04-09 15:07:53 +08:00
{
return NULL;
}
return session_manager_new_udp_session(mgr, pkt, &key);
default:
return NULL;
2023-12-13 19:20:34 +08:00
}
}
2024-03-08 18:10:38 +08:00
void session_manager_free_session(struct session_manager *mgr, struct session *sess)
2023-12-13 19:20:34 +08:00
{
if (sess)
{
2024-04-01 17:13:26 +08:00
SESSION_LOG_DEBUG("session %lu closed (%s)", session_get_id(sess), closing_reason_to_str(session_get_closing_reason(sess)));
2024-03-08 18:10:38 +08:00
session_timer_del(mgr->sess_timer, sess);
switch (session_get_type(sess))
{
case SESSION_TYPE_TCP:
tcp_clean(mgr, sess);
if (session_table_find_sessid(mgr->tcp_sess_table, session_get_id(sess), 0) == sess)
2024-04-30 15:29:31 +08:00
{
session_table_del(mgr->tcp_sess_table, sess);
}
2024-05-20 11:12:24 +08:00
SESS_MGR_STAT_DEC(&mgr->stat, session_get_current_state(sess), tcp);
2024-08-16 18:41:02 +08:00
mgr->stat.tcp_sess_used--;
break;
case SESSION_TYPE_UDP:
if (session_table_find_sessid(mgr->udp_sess_table, session_get_id(sess), 0) == sess)
2024-04-30 15:29:31 +08:00
{
session_table_del(mgr->udp_sess_table, sess);
}
2024-05-20 11:12:24 +08:00
SESS_MGR_STAT_DEC(&mgr->stat, session_get_current_state(sess), udp);
2024-08-16 18:41:02 +08:00
mgr->stat.udp_sess_used--;
break;
default:
assert(0);
break;
}
2024-04-09 15:07:53 +08:00
2024-09-02 17:49:33 +08:00
packet_free((struct packet *)session_get_first_packet(sess, FLOW_TYPE_C2S));
packet_free((struct packet *)session_get_first_packet(sess, FLOW_TYPE_S2C));
session_set_first_packet(sess, FLOW_TYPE_C2S, NULL);
session_set_first_packet(sess, FLOW_TYPE_S2C, NULL);
session_clear_route_ctx(sess, FLOW_TYPE_C2S);
session_clear_route_ctx(sess, FLOW_TYPE_S2C);
session_clear_sids(sess, FLOW_TYPE_C2S);
session_clear_sids(sess, FLOW_TYPE_S2C);
session_set_current_state(sess, SESSION_STATE_INIT);
2024-04-09 15:07:53 +08:00
session_set_current_packet(sess, NULL);
2024-09-02 17:49:33 +08:00
session_set_flow_type(sess, FLOW_TYPE_NONE);
session_pool_push(mgr->sess_pool, sess);
2024-03-08 18:10:38 +08:00
sess = NULL;
}
}
struct session *session_manager_lookup_session_by_packet(struct session_manager *mgr, const struct packet *pkt)
{
struct tuple6 key;
if (packet_get_innermost_tuple6(pkt, &key))
2024-03-08 18:10:38 +08:00
{
return NULL;
2024-03-08 18:10:38 +08:00
}
switch (key.ip_proto)
2024-03-08 18:10:38 +08:00
{
case IPPROTO_UDP:
return session_table_find_tuple6(mgr->udp_sess_table, &key, 0);
case IPPROTO_TCP:
return session_manager_lookup_tcp_session(mgr, pkt, &key);
default:
return NULL;
2024-03-08 18:10:38 +08:00
}
}
struct session *session_manager_lookup_session_by_id(struct session_manager *mgr, uint64_t sess_id)
{
struct session *sess = NULL;
sess = session_table_find_sessid(mgr->tcp_sess_table, sess_id, 1);
if (sess)
{
return sess;
}
sess = session_table_find_sessid(mgr->udp_sess_table, sess_id, 1);
if (sess)
{
return sess;
}
return NULL;
}
int session_manager_update_session(struct session_manager *mgr, struct session *sess, const struct packet *pkt, uint64_t now_ms)
{
mgr->now_ms = now_ms;
struct tuple6 key;
if (packet_get_innermost_tuple6(pkt, &key))
2024-03-08 18:10:38 +08:00
{
return -1;
}
if (duplicated_packet_bypass(mgr, sess, pkt, &key))
{
return -1;
}
switch (session_get_type(sess))
{
case SESSION_TYPE_TCP:
return session_manager_update_tcp_session(mgr, sess, pkt, &key);
case SESSION_TYPE_UDP:
return session_manager_update_udp_session(mgr, sess, pkt, &key);
default:
return -1;
}
2024-03-08 18:10:38 +08:00
}
struct session *session_manager_get_expired_session(struct session_manager *mgr, uint64_t now_ms)
2024-03-08 18:10:38 +08:00
{
mgr->now_ms = now_ms;
struct session *sess = session_timer_expire(mgr->sess_timer, now_ms);
if (sess)
2023-12-13 19:20:34 +08:00
{
2024-05-20 11:12:24 +08:00
enum session_state curr_state = session_get_current_state(sess);
enum session_state next_state = session_transition_run(curr_state, TIMEOUT);
session_transition_log(sess, curr_state, next_state, TIMEOUT);
2024-05-20 11:12:24 +08:00
session_set_current_state(sess, next_state);
2024-04-09 10:36:39 +08:00
switch (session_get_type(sess))
{
case SESSION_TYPE_TCP:
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, tcp);
2024-04-09 10:36:39 +08:00
break;
case SESSION_TYPE_UDP:
2024-04-09 15:07:53 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, udp);
2024-04-09 10:36:39 +08:00
break;
default:
assert(0);
break;
}
// next state is closed, need to free session
if (next_state == SESSION_STATE_CLOSED)
2024-03-08 18:10:38 +08:00
{
if (!session_get_closing_reason(sess))
{
session_set_closing_reason(sess, CLOSING_BY_TIMEOUT);
}
2024-03-08 18:10:38 +08:00
return sess;
}
// next state is closing, only update timeout
else
{
switch (session_get_type(sess))
{
case SESSION_TYPE_TCP:
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, now_ms + mgr->cfg.tcp_timeout_ms.data);
break;
case SESSION_TYPE_UDP:
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, now_ms + mgr->cfg.udp_timeout_ms.data);
break;
default:
assert(0);
break;
}
return NULL;
}
}
2024-03-08 18:10:38 +08:00
return NULL;
}
struct session *session_manager_get_evicted_session(struct session_manager *mgr)
{
struct session *sess = TAILQ_FIRST(&mgr->evicte_list);
if (sess)
2024-03-29 17:45:41 +08:00
{
TAILQ_REMOVE(&mgr->evicte_list, sess, evicte_tqe);
2024-03-29 17:45:41 +08:00
}
return sess;
}
2023-12-19 10:47:26 +08:00
// array_size at least EVICTE_SESSION_BURST, suggest 2 * EVICTE_SESSION_BURST
uint64_t session_manager_clean_session(struct session_manager *mgr, uint64_t now_ms, struct session *cleaned_sess[], uint64_t array_size)
{
mgr->now_ms = now_ms;
struct session *sess = NULL;
uint64_t cleaned_sess_num = 0;
uint64_t expired_sess_num = 0;
uint8_t expired_sess_canbe_clean = 0;
2024-08-30 18:33:41 +08:00
if (now_ms - mgr->last_clean_expired_sess_ts >= mgr->cfg.expire_period_ms)
{
expired_sess_canbe_clean = 1;
}
for (uint64_t i = 0; i < array_size; i++)
{
// frist clean evicted session
sess = session_manager_get_evicted_session(mgr);
if (sess)
{
cleaned_sess[cleaned_sess_num++] = sess;
}
// then clean expired session
else
{
2024-08-30 18:33:41 +08:00
if (expired_sess_canbe_clean && expired_sess_num < mgr->cfg.expire_batch_max)
{
mgr->last_clean_expired_sess_ts = now_ms;
sess = session_manager_get_expired_session(mgr, now_ms);
if (sess)
{
cleaned_sess[cleaned_sess_num++] = sess;
expired_sess_num++;
}
else
{
break;
}
}
else
{
break;
}
}
}
return cleaned_sess_num;
}
uint64_t session_manager_get_expire_interval(struct session_manager *mgr)
{
return session_timer_next_expire_interval(mgr->sess_timer);
2023-12-13 19:20:34 +08:00
}
2023-12-19 10:47:26 +08:00
struct session_manager_stat *session_manager_stat(struct session_manager *mgr)
2024-03-08 14:25:01 +08:00
{
2024-03-11 15:04:18 +08:00
return &mgr->stat;
2024-03-08 14:25:01 +08:00
}
2024-05-20 17:02:16 +08:00
void session_set_discard(struct session *sess)
{
struct session_manager *mgr = sess->mgr;
enum session_type type = session_get_type(sess);
enum session_state curr_state = session_get_current_state(sess);
enum session_state next_state = session_transition_run(curr_state, USER_CLOSE);
session_transition_log(sess, curr_state, next_state, USER_CLOSE);
session_set_current_state(sess, next_state);
switch (type)
{
case SESSION_TYPE_TCP:
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + mgr->cfg.tcp_timeout_ms.discard_default);
2024-05-20 17:02:16 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, tcp);
break;
case SESSION_TYPE_UDP:
2024-08-30 18:33:41 +08:00
session_timer_update(mgr->sess_timer, sess, mgr->now_ms + mgr->cfg.udp_timeout_ms.discard_default);
2024-05-20 17:02:16 +08:00
SESS_MGR_STAT_UPDATE(&mgr->stat, curr_state, next_state, udp);
break;
default:
assert(0);
break;
}
}
2024-08-20 18:43:51 +08:00
uint64_t session_manager_scan(const struct session_manager *mgr, const struct session_scan_opts *opts, uint64_t mached_sess_ids[], uint64_t array_size)
{
uint64_t capacity = 0;
uint64_t max_loop = 0;
uint64_t mached_sess_num = 0;
const struct session *sess = NULL;
const struct tuple6 *tuple = NULL;
if (mgr == NULL || opts == NULL || mached_sess_ids == NULL || array_size == 0)
{
return mached_sess_num;
}
if (opts->count == 0)
{
return mached_sess_num;
}
capacity = session_pool_capacity_size(mgr->sess_pool);
if (opts->cursor >= capacity)
{
return mached_sess_num;
}
max_loop = MIN(capacity, opts->cursor + opts->count);
for (uint64_t i = opts->cursor; i < max_loop; i++)
{
sess = session_pool_get0(mgr->sess_pool, i);
tuple = session_get_tuple6(sess);
if (session_get_current_state(sess) == SESSION_STATE_INIT)
{
continue;
}
if ((opts->flags & SESSION_SCAN_TYPE) && opts->type != session_get_type(sess))
{
continue;
}
if ((opts->flags & SESSION_SCAN_STATE) && opts->state != session_get_current_state(sess))
{
continue;
}
if ((opts->flags & SESSION_SCAN_CREATE_TIME) &&
(session_get_timestamp(sess, SESSION_TIMESTAMP_START) < opts->create_time_ms[0] ||
session_get_timestamp(sess, SESSION_TIMESTAMP_START) > opts->create_time_ms[1]))
{
continue;
}
if ((opts->flags & SESSION_SCAN_LAST_PKT_TIME) &&
(session_get_timestamp(sess, SESSION_TIMESTAMP_LAST) < opts->last_pkt_time_ms[0] ||
session_get_timestamp(sess, SESSION_TIMESTAMP_LAST) > opts->last_pkt_time_ms[1]))
{
continue;
}
if ((opts->flags & SESSION_SCAN_SPORT) && opts->src_port != tuple->src_port)
{
continue;
}
if ((opts->flags & SESSION_SCAN_DPORT) && opts->dst_port != tuple->dst_port)
{
continue;
}
if (opts->flags & SESSION_SCAN_SIP)
{
if (opts->addr_family != tuple->addr_family)
{
continue;
}
if ((opts->addr_family == AF_INET) && !ipv4_in_range(&tuple->src_addr.v4, &opts->src_addr[0].v4, &opts->src_addr[1].v4))
{
continue;
}
if ((opts->addr_family == AF_INET6) && !ipv6_in_range(&tuple->src_addr.v6, &opts->src_addr[0].v6, &opts->src_addr[1].v6))
{
continue;
}
}
if (opts->flags & SESSION_SCAN_DIP)
{
if (opts->addr_family != tuple->addr_family)
{
continue;
}
if ((opts->addr_family == AF_INET) && !ipv4_in_range(&tuple->dst_addr.v4, &opts->dst_addr[0].v4, &opts->dst_addr[1].v4))
{
continue;
}
if ((opts->addr_family == AF_INET6) && !ipv6_in_range(&tuple->dst_addr.v6, &opts->dst_addr[0].v6, &opts->dst_addr[1].v6))
{
continue;
}
}
mached_sess_ids[mached_sess_num++] = session_get_id(sess);
if (mached_sess_num >= array_size)
{
break;
}
}
SESSION_LOG_DEBUG("session scan: cursor=%lu, count=%lu, mached_sess_num=%lu", opts->cursor, opts->count, mached_sess_num);
return mached_sess_num;
}