2023-02-10 14:22:40 +08:00
# include <assert.h>
2023-10-12 11:59:42 +08:00
# include <arpa/inet.h>
# include <netinet/in.h>
2023-02-10 14:22:40 +08:00
# include <netinet/ip.h>
2023-11-20 15:36:28 +08:00
# define __FAVOR_BSD 1
2023-02-10 14:22:40 +08:00
# include <netinet/udp.h>
# include <netinet/ether.h>
2023-04-28 15:00:46 +08:00
# include "mpack.h"
2023-02-10 14:22:40 +08:00
# include <MESA/MESA_prof_load.h>
# include "log.h"
# include "sce.h"
2023-02-17 17:53:21 +08:00
# include "utils.h"
2023-11-20 10:31:21 +08:00
# include "vxlan.h"
2023-04-07 14:09:20 +08:00
# include "packet_io.h"
2024-02-29 18:18:30 +08:00
# include "packet_trace.h"
2023-02-28 19:03:35 +08:00
# include "sf_metrics.h"
2023-10-18 10:08:10 +08:00
# include "control_packet.h"
2023-02-21 09:58:31 +08:00
# include "global_metrics.h"
2023-02-10 14:22:40 +08:00
2023-04-21 19:06:12 +08:00
# define RX_BURST_MAX 128
# define MR_MASK_DECRYPTED 0x01
2023-02-10 14:22:40 +08:00
/******************************************************************************
2023-03-14 16:10:44 +08:00
* struct
2023-02-10 14:22:40 +08:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
struct config
{
2023-05-04 17:56:12 +08:00
int bypass_traffic ;
2023-02-10 14:22:40 +08:00
int rx_burst_max ;
2023-10-16 17:04:21 +08:00
int min_timeout_ms ;
2023-02-10 14:22:40 +08:00
char app_symbol [ 256 ] ;
2023-10-12 11:59:42 +08:00
2023-10-18 10:08:10 +08:00
// dev_nf
char dev_nf_name [ 256 ] ;
// dev_endpoint_l3
char dev_endpoint_l3_name [ 256 ] ;
char dev_endpoint_l3_ip_str [ 16 ] ;
char dev_endpoint_l3_mac_str [ 32 ] ;
in_addr_t dev_endpoint_l3_ip ;
u_char dev_endpoint_l3_mac [ ETH_ALEN ] ;
// dev_endpoint_l2
char dev_endpoint_l2_name [ 256 ] ;
int vlan_encapsulate_replace_orig_vlan_header ;
2023-02-10 14:22:40 +08:00
} ;
struct device
{
struct mr_vdev * mr_dev ;
struct mr_sendpath * mr_path ;
} ;
struct packet_io
{
int thread_num ;
struct mr_instance * instance ;
2023-10-18 10:08:10 +08:00
struct device dev_nf ;
struct device dev_endpoint_l3 ;
struct device dev_endpoint_l2 ;
2023-02-10 14:22:40 +08:00
struct config config ;
} ;
/******************************************************************************
2023-03-14 16:10:44 +08:00
* metadata
2023-02-10 14:22:40 +08:00
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2023-11-24 15:17:18 +08:00
static inline void sids_copy ( struct sids * dst , struct sids * src )
{
if ( dst & & src )
{
dst - > num = src - > num ;
memcpy ( dst - > elems , src - > elems , sizeof ( dst - > elems [ 0 ] ) * dst - > num ) ;
}
}
static inline void route_ctx_copy ( struct route_ctx * dst , struct route_ctx * src )
{
memcpy ( dst - > data , src - > data , src - > len ) ;
dst - > len = src - > len ;
}
static inline void route_ctx_copy_once ( struct route_ctx * dst , struct route_ctx * src )
{
if ( dst - > len = = 0 )
{
memcpy ( dst - > data , src - > data , src - > len ) ;
dst - > len = src - > len ;
}
}
2023-11-13 16:56:31 +08:00
void sce_packet_get_innermost_tuple ( const struct packet * handler , struct four_tuple * tuple )
{
memset ( tuple , 0 , sizeof ( struct four_tuple ) ) ;
if ( packet_get_innermost_four_tuple ( handler , tuple ) = = - 1 )
{
packet_get_innermost_two_tuple ( handler , & tuple - > two_tuple ) ;
}
}
2023-02-10 14:22:40 +08:00
// return 0 : success
// return -1 : error
2023-03-30 17:44:33 +08:00
int mbuff_get_metadata ( marsio_buff_t * rx_buff , struct metadata * meta )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
if ( marsio_buff_get_metadata ( rx_buff , MR_BUFF_SESSION_ID , & ( meta - > session_id ) , sizeof ( meta - > session_id ) ) < = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to get session_id from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-11-21 19:08:30 +08:00
if ( marsio_buff_get_metadata ( rx_buff , MR_BUFF_REHASH_INDEX , & ( meta - > rehash_index ) , sizeof ( meta - > rehash_index ) ) < = 0 )
{
LOG_ERROR ( " %s: unable to get rehash_index from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
2024-04-20 11:40:00 +08:00
if ( marsio_buff_get_metadata ( rx_buff , MR_BUFF_LINK_ID , & ( meta - > link_id ) , sizeof ( meta - > link_id ) ) < = 0 )
{
LOG_ERROR ( " %s: unable to get link_id from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
2023-03-14 16:10:44 +08:00
// 1: E2I
// 0: I2E
2023-11-23 16:52:06 +08:00
if ( marsio_buff_get_metadata ( rx_buff , MR_BUFF_DIR , & ( meta - > direction ) , sizeof ( meta - > direction ) ) < = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to get buff_dir from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-11-22 16:33:52 +08:00
if ( meta - > is_ctrl_pkt )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
if ( marsio_buff_get_metadata ( rx_buff , MR_BUFF_PAYLOAD_OFFSET , & ( meta - > l7offset ) , sizeof ( meta - > l7offset ) ) < = 0 )
{
LOG_ERROR ( " %s: unable to get l7offset from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
else
2023-02-10 14:22:40 +08:00
{
2023-04-21 19:06:12 +08:00
uint16_t user_data = 0 ;
if ( marsio_buff_get_metadata ( rx_buff , MR_BUFF_USER_0 , & user_data , sizeof ( user_data ) ) < = 0 )
2023-03-14 16:10:44 +08:00
{
LOG_ERROR ( " %s: unable to get is_decrypted from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
2023-04-21 19:06:12 +08:00
if ( user_data & MR_MASK_DECRYPTED )
{
meta - > is_decrypted = 1 ;
}
else
{
meta - > is_decrypted = 0 ;
}
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
meta - > sids . num = marsio_buff_get_sid_list ( rx_buff , meta - > sids . elems , sizeof ( meta - > sids . elems ) / sizeof ( meta - > sids . elems [ 0 ] ) ) ;
if ( meta - > sids . num < 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to get sid_list from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
meta - > route_ctx . len = marsio_buff_get_metadata ( rx_buff , MR_BUFF_ROUTE_CTX , meta - > route_ctx . data , sizeof ( meta - > route_ctx . data ) ) ;
if ( meta - > route_ctx . len < = 0 )
2023-02-24 15:32:37 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to get route_ctx from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-24 15:32:37 +08:00
}
2023-03-14 16:10:44 +08:00
return 0 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
// return 0 : success
// return -1 : error
2023-03-30 17:44:33 +08:00
int mbuff_set_metadata ( marsio_buff_t * tx_buff , struct metadata * meta )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
if ( meta - > session_id )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
if ( marsio_buff_set_metadata ( tx_buff , MR_BUFF_SESSION_ID , & ( meta - > session_id ) , sizeof ( meta - > session_id ) ) ! = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to set session_id for metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
}
2023-02-10 14:22:40 +08:00
2024-04-20 11:40:00 +08:00
if ( meta - > link_id )
{
if ( marsio_buff_set_metadata ( tx_buff , MR_BUFF_LINK_ID , & ( meta - > link_id ) , sizeof ( meta - > link_id ) ) ! = 0 )
{
LOG_ERROR ( " %s: unable to set link_id from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
}
/*
* for stateless inject packet , set direction is necessary ;
* if later set route_ctx , dir will be overwrite by route_ctx .
*
* direction : 1 ( E2I )
* direction : 0 ( I2E )
*/
if ( marsio_buff_set_metadata ( tx_buff , MR_BUFF_DIR , & ( meta - > direction ) , sizeof ( meta - > direction ) ) ! = 0 )
{
LOG_ERROR ( " %s: unable to set buff_dir from metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( meta - > is_ctrl_pkt )
{
2023-03-30 17:44:33 +08:00
marsio_buff_set_ctrlbuf ( tx_buff ) ;
2023-03-14 16:10:44 +08:00
if ( marsio_buff_set_metadata ( tx_buff , MR_BUFF_PAYLOAD_OFFSET , & ( meta - > l7offset ) , sizeof ( meta - > l7offset ) ) ! = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to set l7offset for metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
}
else
{
2023-04-21 19:06:12 +08:00
uint16_t user_data = 0 ;
if ( meta - > is_decrypted )
{
user_data = MR_MASK_DECRYPTED ;
}
if ( marsio_buff_set_metadata ( tx_buff , MR_BUFF_USER_0 , & user_data , sizeof ( user_data ) ) ! = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to set is_decrypted for metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( meta - > sids . num > 0 )
{
if ( marsio_buff_set_sid_list ( tx_buff , meta - > sids . elems , meta - > sids . num ) ! = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to set sid_list for metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
}
2023-03-14 16:10:44 +08:00
if ( meta - > route_ctx . len > 0 )
2023-03-27 14:37:18 +08:00
{
2023-03-14 16:10:44 +08:00
if ( marsio_buff_set_metadata ( tx_buff , MR_BUFF_ROUTE_CTX , meta - > route_ctx . data , meta - > route_ctx . len ) ! = 0 )
{
LOG_ERROR ( " %s: unable to set route_ctx for metadata " , LOG_TAG_PKTIO ) ;
return - 1 ;
}
2023-03-27 14:37:18 +08:00
}
return 0 ;
}
2023-05-06 10:41:30 +08:00
static void update_session_by_metadata ( struct session_ctx * ctx , struct metadata * meta )
{
2023-11-24 15:17:18 +08:00
struct sids * e2i_sids = NULL ;
struct sids * i2e_sids = NULL ;
struct route_ctx * e2i_route_ctx = NULL ;
struct route_ctx * i2e_route_ctx = NULL ;
2023-05-06 10:41:30 +08:00
if ( meta - > is_decrypted )
{
2023-11-24 15:17:18 +08:00
e2i_sids = & ctx - > decrypted_e2i_sids ;
i2e_sids = & ctx - > decrypted_i2e_sids ;
e2i_route_ctx = & ctx - > decrypted_e2i_route_ctx ;
i2e_route_ctx = & ctx - > decrypted_i2e_route_ctx ;
2023-05-06 10:41:30 +08:00
}
else
{
2023-11-24 15:17:18 +08:00
e2i_sids = & ctx - > raw_e2i_sids ;
i2e_sids = & ctx - > raw_i2e_sids ;
e2i_route_ctx = & ctx - > raw_e2i_route_ctx ;
i2e_route_ctx = & ctx - > raw_i2e_route_ctx ;
2023-05-06 10:41:30 +08:00
}
2023-11-23 16:52:06 +08:00
// 1: E2I
// 0: I2E
if ( meta - > direction )
2023-05-06 10:41:30 +08:00
{
2023-11-24 15:17:18 +08:00
route_ctx_copy_once ( e2i_route_ctx , & meta - > route_ctx ) ;
sids_copy ( e2i_sids , & meta - > sids ) ;
2023-05-06 10:41:30 +08:00
}
else
{
2023-11-24 15:17:18 +08:00
route_ctx_copy_once ( i2e_route_ctx , & meta - > route_ctx ) ;
sids_copy ( i2e_sids , & meta - > sids ) ;
2023-05-06 10:41:30 +08:00
}
}
static void update_metadata_by_session ( struct session_ctx * ctx , struct metadata * meta )
{
struct sids * sids = NULL ;
struct route_ctx * route_ctx = NULL ;
meta - > session_id = ctx - > session_id ;
2023-11-23 16:52:06 +08:00
// 1: E2I
// 0: I2E
if ( meta - > direction )
2023-05-06 10:41:30 +08:00
{
if ( meta - > is_decrypted )
{
2023-11-24 15:17:18 +08:00
sids = & ( ctx - > decrypted_e2i_sids ) ;
route_ctx = & ( ctx - > decrypted_e2i_route_ctx ) ;
2023-05-06 10:41:30 +08:00
}
else
{
2023-11-24 15:17:18 +08:00
sids = & ( ctx - > raw_e2i_sids ) ;
route_ctx = & ( ctx - > raw_e2i_route_ctx ) ;
2023-05-06 10:41:30 +08:00
}
}
else
{
if ( meta - > is_decrypted )
{
2023-11-24 15:17:18 +08:00
sids = & ( ctx - > decrypted_i2e_sids ) ;
route_ctx = & ( ctx - > decrypted_i2e_route_ctx ) ;
2023-05-06 10:41:30 +08:00
}
else
{
2023-11-24 15:17:18 +08:00
sids = & ( ctx - > raw_i2e_sids ) ;
route_ctx = & ( ctx - > raw_i2e_route_ctx ) ;
2023-05-06 10:41:30 +08:00
}
}
sids_copy ( & meta - > sids , sids ) ;
route_ctx_copy ( & meta - > route_ctx , route_ctx ) ;
}
/******************************************************************************
* keepalive
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2023-03-14 16:10:44 +08:00
// return 0 : not keepalive packet
// return 1 : is keepalive packet
2023-11-24 15:17:18 +08:00
static inline int is_downlink_keepalive_packet ( marsio_buff_t * rx_buff , int raw_len )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
char * raw_data = marsio_buff_mtod ( rx_buff ) ;
if ( raw_data = = NULL | | raw_len < ( int ) ( sizeof ( struct ethhdr ) ) )
2023-02-10 14:22:40 +08:00
{
return 0 ;
}
2023-03-14 16:10:44 +08:00
struct ethhdr * eth_hdr = ( struct ethhdr * ) raw_data ;
if ( eth_hdr - > h_proto = = 0xAAAA )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
return 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
else
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
return 0 ;
2023-02-10 14:22:40 +08:00
}
}
2023-03-14 16:10:44 +08:00
// return 0 : not keepalive packet
// return 1 : is keepalive packet
2023-11-24 15:17:18 +08:00
static inline int is_uplink_keepalive_packet ( marsio_buff_t * rx_buff , int raw_len )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
char * raw_data = marsio_buff_mtod ( rx_buff ) ;
2023-11-20 10:31:21 +08:00
if ( raw_data = = NULL | | raw_len < ( int ) ( sizeof ( struct ethhdr ) + sizeof ( struct ip ) + sizeof ( struct udphdr ) ) )
2023-03-14 16:10:44 +08:00
{
return 0 ;
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
struct ethhdr * eth_hdr = ( struct ethhdr * ) raw_data ;
if ( eth_hdr - > h_proto ! = htons ( ETH_P_IP ) )
2023-02-10 14:22:40 +08:00
{
return 0 ;
}
2023-03-14 16:10:44 +08:00
struct ip * ip_hdr = ( struct ip * ) ( ( char * ) eth_hdr + sizeof ( struct ethhdr ) ) ;
if ( ip_hdr - > ip_p ! = IPPROTO_UDP )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
return 0 ;
2023-02-10 14:22:40 +08:00
}
2023-11-20 10:31:21 +08:00
struct udphdr * udp_hdr = ( struct udphdr * ) ( ( char * ) ip_hdr + sizeof ( struct ip ) ) ;
2023-03-14 16:10:44 +08:00
if ( udp_hdr - > uh_dport ! = htons ( 3784 ) )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
return 0 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
return 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
/******************************************************************************
* search session ctx
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
// return !NULL
// return NULL
2023-11-29 13:59:06 +08:00
static struct session_ctx * data_packet_search_session ( struct session_table * table , const char * raw_data , int raw_len , uint64_t session_id , struct thread_ctx * thread_ctx )
2023-02-10 14:22:40 +08:00
{
2023-11-28 16:36:58 +08:00
struct session_ctx * session_ctx = ( struct session_ctx * ) session_table_search_by_id ( table , session_id ) ;
if ( session_ctx = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
return NULL ;
2023-02-10 14:22:40 +08:00
}
2023-11-29 13:59:06 +08:00
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
if ( sce_ctx - > enable_debug )
2023-02-10 14:22:40 +08:00
{
2023-11-29 13:59:06 +08:00
struct four_tuple inner_addr ;
struct four_tuple reverse_addr ;
struct packet data_pkt ;
packet_parse ( & data_pkt , raw_data , raw_len ) ;
sce_packet_get_innermost_tuple ( & data_pkt , & inner_addr ) ;
four_tuple_reverse ( & inner_addr , & reverse_addr ) ;
if ( memcmp ( & session_ctx - > inner_tuple4 , & inner_addr , sizeof ( struct four_tuple ) ) ! = 0 & & memcmp ( & session_ctx - > inner_tuple4 , & reverse_addr , sizeof ( struct four_tuple ) ) ! = 0 )
{
char * addr_str = four_tuple_tostring ( & inner_addr ) ;
LOG_ERROR ( " %s: unexpected raw packet, session %lu expected address tuple4 is %s, but current packet's address tuple4 is %s, bypass !!! " , LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr , addr_str ) ;
free ( addr_str ) ;
return NULL ;
}
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
return session_ctx ;
}
// return !NULL
// return NULL
2023-11-29 13:59:06 +08:00
static struct session_ctx * inject_packet_search_session ( struct session_table * table , const char * raw_data , int raw_len , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-11-08 10:20:27 +08:00
struct four_tuple inner_addr ;
2023-11-13 16:56:31 +08:00
struct packet data_pkt ;
2023-03-14 16:10:44 +08:00
2023-11-13 16:56:31 +08:00
packet_parse ( & data_pkt , raw_data , raw_len ) ;
sce_packet_get_innermost_tuple ( & data_pkt , & inner_addr ) ;
2023-03-14 16:10:44 +08:00
2023-11-28 16:36:58 +08:00
struct session_ctx * session_ctx = ( struct session_ctx * ) session_table_search_by_addr ( table , & inner_addr ) ;
if ( session_ctx = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-11-13 16:56:31 +08:00
char * addr_str = four_tuple_tostring ( & inner_addr ) ;
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unexpected inject packet, unable to find session %s from session table, drop !!! " , LOG_TAG_PKTIO , addr_str ) ;
free ( addr_str ) ;
return NULL ;
2023-02-10 14:22:40 +08:00
}
2023-11-28 16:36:58 +08:00
return session_ctx ;
2023-03-14 16:10:44 +08:00
}
/******************************************************************************
* action bypass / block / forward
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2023-10-18 10:08:10 +08:00
struct vlan_hdr
{
uint16_t vlan_cfi ;
uint16_t protocol ;
} __attribute__ ( ( __packed__ ) ) ;
static void build_vlan_header ( struct vlan_hdr * vlan_hdr , uint16_t vlan_id , uint16_t protocol )
{
vlan_hdr - > vlan_cfi = 0 ;
vlan_hdr - > vlan_cfi = htons ( vlan_id & 0xFFF ) ;
vlan_hdr - > protocol = htons ( protocol ) ;
}
static void overwrite_vlan_id ( struct vlan_hdr * vlan_hdr , uint16_t vlan_id )
{
vlan_hdr - > vlan_cfi = 0 ;
vlan_hdr - > vlan_cfi = htons ( vlan_id & 0xFFF ) ;
2023-03-14 16:10:44 +08:00
}
2023-10-18 10:08:10 +08:00
void vlan_encapsulate ( marsio_buff_t * mbuff , int vlan_id , int replace_orig_vlan_header )
{
2023-11-21 10:46:00 +08:00
struct ethhdr * old_eth_hdr = ( struct ethhdr * ) marsio_buff_mtod ( mbuff ) ;
2023-10-18 10:08:10 +08:00
if ( replace_orig_vlan_header = = 0 )
{
append :
2023-11-21 10:46:00 +08:00
struct ethhdr * new_eth_hdr = ( struct ethhdr * ) marsio_buff_prepend ( mbuff , sizeof ( struct vlan_hdr ) ) ;
char * dst = ( char * ) new_eth_hdr ;
char * src = ( char * ) old_eth_hdr ;
memcpy ( dst , src , 4 ) ;
memcpy ( dst + 4 , src + 4 , 4 ) ;
memcpy ( dst + 8 , src + 8 , 4 ) ;
new_eth_hdr - > h_proto = htons ( ETH_P_8021Q ) ;
struct vlan_hdr * vlan_hdr = ( struct vlan_hdr * ) ( ( char * ) new_eth_hdr + sizeof ( struct ethhdr ) ) ;
build_vlan_header ( vlan_hdr , vlan_id , ntohs ( old_eth_hdr - > h_proto ) ) ;
2023-10-18 10:08:10 +08:00
return ;
}
else
{
2023-11-21 10:46:00 +08:00
uint16_t next_proto = old_eth_hdr - > h_proto ;
char * start_layer = ( char * ) old_eth_hdr + sizeof ( struct ethhdr ) ;
2023-10-18 10:08:10 +08:00
char * next_layer = start_layer ;
while ( next_proto = = htons ( ETH_P_8021Q ) | | next_proto = = htons ( ETH_P_8021AD ) )
{
struct vlan_hdr * vlan_hdr = ( struct vlan_hdr * ) next_layer ;
next_proto = vlan_hdr - > protocol ;
next_layer + = sizeof ( struct vlan_hdr ) ;
}
// No vlan header found
uint64_t offset = next_layer - start_layer ;
if ( offset = = 0 )
{
goto append ;
}
// Find a layer of vlan header
if ( offset = = sizeof ( struct vlan_hdr ) )
{
struct vlan_hdr * vlan_hdr = ( struct vlan_hdr * ) start_layer ;
overwrite_vlan_id ( vlan_hdr , vlan_id ) ;
return ;
}
// Find the multi-layer vlan header
if ( offset > sizeof ( struct vlan_hdr ) )
{
struct vlan_hdr * vlan_hdr = ( struct vlan_hdr * ) ( next_layer - sizeof ( struct vlan_hdr ) ) ;
struct ethhdr * new_eth_hdr = ( struct ethhdr * ) ( ( char * ) vlan_hdr - sizeof ( struct ethhdr ) ) ;
overwrite_vlan_id ( vlan_hdr , vlan_id ) ;
2023-11-21 10:46:00 +08:00
memmove ( new_eth_hdr , ( char * ) old_eth_hdr , sizeof ( struct ethhdr ) ) ;
2023-10-18 10:08:10 +08:00
new_eth_hdr - > h_proto = htons ( ETH_P_8021Q ) ;
marsio_buff_adj ( mbuff , offset - sizeof ( struct vlan_hdr ) ) ;
return ;
}
assert ( 0 ) ;
}
}
2023-11-21 11:20:48 +08:00
static inline int send_packet_to_sf ( struct session_ctx * session_ctx , marsio_buff_t * mbuff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-18 10:08:10 +08:00
int nsend = 0 ;
2023-11-20 10:31:21 +08:00
char * buffer = NULL ;
2023-03-14 16:10:44 +08:00
struct packet_io * packet_io = thread_ctx - > ref_io ;
2023-10-18 10:08:10 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-11-24 15:17:18 +08:00
uint32_t rehash_index = session_ctx - > rehash_index ;
2023-03-14 16:10:44 +08:00
2023-10-18 10:08:10 +08:00
marsio_buff_ctrlzone_reset ( mbuff ) ;
2023-03-14 16:10:44 +08:00
switch ( sf - > sf_connectivity . method )
2023-02-24 15:32:37 +08:00
{
2023-10-18 10:08:10 +08:00
case ENCAPSULATE_METHOD_VXLAN_G :
2023-11-22 16:16:59 +08:00
thread_ctx - > tx_packets_ipid + + ;
2023-11-20 10:31:21 +08:00
buffer = marsio_buff_prepend ( mbuff , VXLAN_FRAME_HDR_LEN ) ;
vxlan_frame_encode ( buffer ,
packet_io - > config . dev_endpoint_l3_mac , sf - > sf_dst_mac ,
packet_io - > config . dev_endpoint_l3_ip , sf - > sf_dst_ip , thread_ctx - > tx_packets_ipid % 65535 ,
session_ctx - > vxlan_src_port , meta - > raw_len ,
2024-04-20 11:40:00 +08:00
meta - > direction , meta - > is_decrypted , sf - > sf_index , meta - > link_id ) ;
2023-10-18 10:08:10 +08:00
nsend = marsio_buff_datalen ( mbuff ) ;
2023-11-23 16:52:06 +08:00
marsio_buff_set_metadata ( mbuff , MR_BUFF_REHASH_INDEX , & rehash_index , sizeof ( rehash_index ) ) ;
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_NEW ( packet_io - > instance , mbuff ) ;
2023-11-21 19:08:30 +08:00
marsio_send_burst ( packet_io - > dev_endpoint_l3 . mr_path , thread_ctx - > thread_index , & mbuff , 1 ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vxlan_tx ) , 1 , nsend ) ;
2023-03-14 16:10:44 +08:00
break ;
2023-10-18 10:08:10 +08:00
case ENCAPSULATE_METHOD_LAYER2_SWITCH :
vlan_encapsulate ( mbuff ,
2023-11-23 16:52:06 +08:00
meta - > direction ? sf - > sf_connectivity . ext_vlan_tag : sf - > sf_connectivity . int_vlan_tag ,
2023-10-18 10:08:10 +08:00
packet_io - > config . vlan_encapsulate_replace_orig_vlan_header ) ;
nsend = marsio_buff_datalen ( mbuff ) ;
2023-11-23 16:52:06 +08:00
marsio_buff_set_metadata ( mbuff , MR_BUFF_REHASH_INDEX , & rehash_index , sizeof ( rehash_index ) ) ;
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_NEW ( packet_io - > instance , mbuff ) ;
2023-11-21 19:08:30 +08:00
marsio_send_burst ( packet_io - > dev_endpoint_l2 . mr_path , thread_ctx - > thread_index , & mbuff , 1 ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vlan_tx ) , 1 , nsend ) ;
2023-03-14 16:10:44 +08:00
break ;
2023-10-18 10:08:10 +08:00
case ENCAPSULATE_METHOD_LAYER3_SWITCH :
2023-03-14 16:10:44 +08:00
// TODO
break ;
default :
break ;
2023-02-24 15:32:37 +08:00
}
2023-03-14 16:10:44 +08:00
return nsend ;
2023-02-10 14:22:40 +08:00
}
2023-11-21 11:20:48 +08:00
static inline int action_nf_inject ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx ) ;
2023-03-14 16:10:44 +08:00
2023-11-21 11:20:48 +08:00
static inline void action_err_bypass ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-02-10 14:22:40 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
int nsend = action_nf_inject ( rx_buff , meta , sf , thread_ctx ) ;
if ( nsend > 0 )
2023-02-10 14:22:40 +08:00
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > error_bypass ) , 1 , nsend ) ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
}
2023-02-10 14:22:40 +08:00
2023-11-21 11:20:48 +08:00
static inline void action_err_block ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
struct packet_io * packet_io = thread_ctx - > ref_io ;
int thread_index = thread_ctx - > thread_index ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > error_block ) , 1 , meta - > raw_len ) ;
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_FREE ( packet_io - > instance , rx_buff ) ;
2023-03-14 16:10:44 +08:00
marsio_buff_free ( packet_io - > instance , & rx_buff , 1 , 0 , thread_index ) ;
}
// return nsend
2023-11-21 11:20:48 +08:00
static inline int action_nf_inject ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
struct packet_io * packet_io = thread_ctx - > ref_io ;
int thread_index = thread_ctx - > thread_index ;
marsio_buff_ctrlzone_reset ( rx_buff ) ;
if ( mbuff_set_metadata ( rx_buff , meta ) ! = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
action_err_block ( rx_buff , meta , sf , thread_ctx ) ;
return 0 ;
2023-02-10 14:22:40 +08:00
}
2024-03-06 11:56:07 +08:00
if ( meta - > is_decrypted )
{
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > dec_tx ) , 1 , meta - > raw_len ) ;
}
else
{
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > raw_tx ) , 1 , meta - > raw_len ) ;
}
2023-10-18 10:08:10 +08:00
marsio_send_burst ( packet_io - > dev_nf . mr_path , thread_index , & rx_buff , 1 ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_tx ) , 1 , meta - > raw_len ) ;
2023-11-22 16:16:59 +08:00
return meta - > raw_len ;
2023-03-14 16:10:44 +08:00
}
2023-11-21 11:20:48 +08:00
static inline void action_mirr_bypass ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > mirr_bypass ) , 1 , meta - > raw_len ) ;
2023-03-14 16:10:44 +08:00
}
2023-11-21 11:20:48 +08:00
static inline void action_mirr_block ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > mirr_block ) , 1 , meta - > raw_len ) ;
2023-03-14 16:10:44 +08:00
}
2023-11-21 11:20:48 +08:00
static inline void action_mirr_forward ( struct session_ctx * session_ctx , marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
struct packet_io * packet_io = thread_ctx - > ref_io ;
int thread_index = thread_ctx - > thread_index ;
char * raw_data = marsio_buff_mtod ( rx_buff ) ;
marsio_buff_t * new_buff = NULL ;
if ( marsio_buff_malloc_global ( packet_io - > instance , & new_buff , 1 , MARSIO_SOCKET_ID_ANY , MARSIO_LCORE_ID_ANY ) < 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to malloc buff on marsio instance, thread_index: %d " , LOG_TAG_PKTIO , thread_index ) ;
return ;
2023-02-10 14:22:40 +08:00
}
2023-11-22 16:16:59 +08:00
char * copy_ptr = marsio_buff_append ( new_buff , meta - > raw_len ) ;
memcpy ( copy_ptr , raw_data , meta - > raw_len ) ;
2023-03-14 16:10:44 +08:00
2023-11-20 10:31:21 +08:00
int nsend = send_packet_to_sf ( session_ctx , new_buff , meta , sf , thread_ctx ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > mirr_tx ) , 1 , meta - > raw_len ) ;
THROUGHPUT_METRICS_INC ( & sf - > tx , 1 , nsend ) ;
2023-11-22 14:49:25 +08:00
struct sf_metrics_key key = { 0 } ;
key . rule_id = sf - > rule_id ;
key . sff_profile_id = sf - > sff_profile_id ;
key . sf_profile_id = sf - > sf_profile_id ;
key . vsys_id = sf - > rule_vsys_id ;
sf_metrics_inc ( thread_ctx - > sf_metrics , & key , 0 , 0 , 1 , nsend ) ;
2023-03-14 16:10:44 +08:00
}
2023-11-21 11:20:48 +08:00
static inline void action_stee_bypass ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > stee_bypass ) , 1 , meta - > raw_len ) ;
2023-03-14 16:10:44 +08:00
}
2023-11-21 11:20:48 +08:00
static inline void action_stee_block ( marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
struct packet_io * packet_io = thread_ctx - > ref_io ;
int thread_index = thread_ctx - > thread_index ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > stee_block ) , 1 , meta - > raw_len ) ;
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_FREE ( packet_io - > instance , rx_buff ) ;
2023-03-14 16:10:44 +08:00
marsio_buff_free ( packet_io - > instance , & rx_buff , 1 , 0 , thread_index ) ;
}
2023-11-21 11:20:48 +08:00
static inline void action_stee_forward ( struct session_ctx * session_ctx , marsio_buff_t * rx_buff , struct metadata * meta , struct selected_sf * sf , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
2023-11-20 10:31:21 +08:00
int nsend = send_packet_to_sf ( session_ctx , rx_buff , meta , sf , thread_ctx ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > stee_tx ) , 1 , meta - > raw_len ) ;
THROUGHPUT_METRICS_INC ( & sf - > tx , 1 , nsend ) ;
2023-11-22 14:49:25 +08:00
struct sf_metrics_key key = { 0 } ;
key . rule_id = sf - > rule_id ;
key . sff_profile_id = sf - > sff_profile_id ;
key . sf_profile_id = sf - > sf_profile_id ;
key . vsys_id = sf - > rule_vsys_id ;
sf_metrics_inc ( thread_ctx - > sf_metrics , & key , 0 , 0 , 1 , nsend ) ;
2023-03-14 16:10:44 +08:00
}
static void action_sf_chaining ( struct thread_ctx * thread_ctx , struct session_ctx * session_ctx , struct selected_chaining * chaining , marsio_buff_t * rx_buff , struct metadata * meta , int next_sf_index )
{
int sf_index ;
for ( sf_index = next_sf_index ; sf_index < chaining - > chaining_used ; sf_index + + )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
struct selected_sf * sf = & ( chaining - > chaining [ sf_index ] ) ;
2023-11-23 11:04:08 +08:00
LOG_DEBUG ( " %s: session: %lu %s execute chaining [%d/%d]: policy %lu->%d->%d, action %s->%s->%s->%s " ,
LOG_TAG_POLICY , session_ctx - > session_id , session_ctx - > session_addr ,
sf_index , chaining - > chaining_used ,
sf - > rule_id , sf - > sff_profile_id , sf - > sf_profile_id ,
2023-11-23 16:52:06 +08:00
( meta - > is_decrypted ? " decrypted " : " raw " ) , ( meta - > direction ? " E2I " : " I2E " ) , forward_type_tostring ( sf - > sff_forward_type ) , action_desc_tostring ( sf - > sf_action_desc ) ) ;
2023-03-14 16:10:44 +08:00
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_CHAIN ( thread_ctx - > ref_io - > instance , rx_buff , sf , meta ) ;
2024-04-26 17:37:54 +08:00
PACKET_TELEMETRY_ON_CHAIN ( thread_ctx - > ref_io - > instance , rx_buff , sf , meta ) ;
2023-03-14 16:10:44 +08:00
switch ( sf - > sf_action )
{
case SESSION_ACTION_BYPASS :
if ( sf - > sff_forward_type = = FORWARD_TYPE_STEERING )
{
action_stee_bypass ( rx_buff , meta , sf , thread_ctx ) ;
continue ;
}
else
{
action_mirr_bypass ( rx_buff , meta , sf , thread_ctx ) ;
continue ;
}
case SESSION_ACTION_BLOCK :
if ( sf - > sff_forward_type = = FORWARD_TYPE_STEERING )
{
action_stee_block ( rx_buff , meta , sf , thread_ctx ) ;
return ;
}
else
{
action_mirr_block ( rx_buff , meta , sf , thread_ctx ) ;
2023-04-07 16:57:36 +08:00
action_nf_inject ( rx_buff , meta , NULL , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
return ;
}
case SESSION_ACTION_FORWARD :
2023-10-18 10:08:10 +08:00
if ( sf - > sf_connectivity . method ! = ENCAPSULATE_METHOD_VXLAN_G & & sf - > sf_connectivity . method ! = ENCAPSULATE_METHOD_LAYER2_SWITCH )
2023-03-14 16:10:44 +08:00
{
LOG_ERROR ( " %s: processing packets, session %lu %s requires encapsulation format not supported, bypass !!! " ,
LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr ) ;
action_err_bypass ( rx_buff , meta , sf , thread_ctx ) ;
return ;
}
if ( sf - > sff_forward_type = = FORWARD_TYPE_STEERING )
{
2023-11-20 10:31:21 +08:00
action_stee_forward ( session_ctx , rx_buff , meta , sf , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
return ;
}
else
{
2023-11-20 10:31:21 +08:00
action_mirr_forward ( session_ctx , rx_buff , meta , sf , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
continue ;
}
2023-02-10 14:22:40 +08:00
}
}
2023-03-14 16:10:44 +08:00
if ( sf_index = = chaining - > chaining_used )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
action_nf_inject ( rx_buff , meta , NULL , thread_ctx ) ;
}
}
/******************************************************************************
* handle session status
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2023-11-23 16:52:06 +08:00
static int send_ctrl_packet ( struct session_ctx * session_ctx , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
struct packet_io * packet_io = thread_ctx - > ref_io ;
2023-11-14 18:28:27 +08:00
struct mutable_array * rule_ids = & session_ctx - > rule_ids ;
2023-11-23 16:52:06 +08:00
struct selected_chaining * chaining_raw = session_ctx - > chaining_raw ;
struct selected_chaining * chaining_decrypted = session_ctx - > chaining_decrypted ;
2023-03-14 16:10:44 +08:00
int thread_index = thread_ctx - > thread_index ;
2023-11-14 18:28:27 +08:00
int sc_rsp_raw_exist = 0 ;
int sc_rsp_decrypted_exist = 0 ;
2023-03-14 16:10:44 +08:00
2023-04-28 15:00:46 +08:00
char * data ;
size_t size ;
mpack_writer_t writer ;
mpack_writer_init_growable ( & writer , & data , & size ) ;
// write the example on the msgpack homepage
mpack_build_map ( & writer ) ; // root begin
// tsync
mpack_write_cstr ( & writer , " tsync " ) ;
mpack_write_cstr ( & writer , " 2.0 " ) ;
// session_id
mpack_write_cstr ( & writer , " session_id " ) ;
mpack_write_u64 ( & writer , session_ctx - > session_id ) ;
// state
2023-05-08 10:53:44 +08:00
mpack_write_cstr ( & writer , " state " ) ;
mpack_write_cstr ( & writer , " active " ) ;
2023-04-28 15:00:46 +08:00
// method
mpack_write_cstr ( & writer , " method " ) ;
mpack_write_cstr ( & writer , " log_update " ) ;
// params
2023-03-14 16:10:44 +08:00
{
2023-04-28 15:00:46 +08:00
mpack_write_cstr ( & writer , " params " ) ;
mpack_build_map ( & writer ) ; // params value begin
// sce
2023-02-10 14:22:40 +08:00
{
2023-04-28 15:00:46 +08:00
mpack_write_cstr ( & writer , " sce " ) ;
mpack_build_map ( & writer ) ; // sce value begin
{
2023-11-14 18:28:27 +08:00
mpack_write_cstr ( & writer , " sc_rule_list " ) ;
mpack_build_array ( & writer ) ; // sc_rule_list begin
for ( int i = 0 ; i < rule_ids - > num ; i + + )
2023-04-28 15:00:46 +08:00
{
2023-11-14 18:28:27 +08:00
mpack_write_u64 ( & writer , mutable_array_index_elem ( rule_ids , i ) ) ;
}
mpack_complete_array ( & writer ) ; // sc_rule_list end
}
{
for ( int i = 0 ; i < chaining_raw - > chaining_used ; i + + )
{
struct selected_sf * sf = & ( chaining_raw - > chaining [ i ] ) ;
if ( sf - > sf_action = = SESSION_ACTION_FORWARD )
{
if ( sc_rsp_raw_exist = = 0 )
{
mpack_write_cstr ( & writer , " sc_rsp_raw " ) ;
mpack_build_array ( & writer ) ; // sc_rsp_raw begin
sc_rsp_raw_exist = 1 ;
}
mpack_write_u64 ( & writer , sf - > sf_profile_id ) ;
}
}
if ( sc_rsp_raw_exist = = 1 )
{
mpack_complete_array ( & writer ) ; // sc_rsp_raw end
}
}
{
for ( int i = 0 ; i < chaining_decrypted - > chaining_used ; i + + )
{
struct selected_sf * sf = & ( chaining_decrypted - > chaining [ i ] ) ;
if ( sf - > sf_action = = SESSION_ACTION_FORWARD )
{
if ( sc_rsp_decrypted_exist = = 0 )
{
mpack_write_cstr ( & writer , " sc_rsp_decrypted " ) ;
mpack_build_array ( & writer ) ; // sc_rsp_decrypted begin
sc_rsp_decrypted_exist = 1 ;
}
mpack_write_u64 ( & writer , sf - > sf_profile_id ) ;
}
}
if ( sc_rsp_decrypted_exist = = 1 )
{
mpack_complete_array ( & writer ) ; // sc_rsp_decrypted end
2023-04-28 15:00:46 +08:00
}
}
mpack_complete_map ( & writer ) ; // sce value end
2023-02-10 14:22:40 +08:00
}
2023-04-28 15:00:46 +08:00
mpack_complete_map ( & writer ) ; // params value end
}
mpack_complete_map ( & writer ) ; // root end
// finish writing
if ( mpack_writer_destroy ( & writer ) ! = mpack_ok )
{
assert ( 0 ) ;
if ( data )
{
free ( data ) ;
data = NULL ;
}
return 0 ;
2023-02-10 14:22:40 +08:00
}
2023-11-23 16:52:06 +08:00
LOG_INFO ( " %s: session %lu %s send event log %ld bytes " , LOG_TAG_SFMETRICS , session_ctx - > session_id , session_ctx - > session_addr , size ) ;
2023-03-14 16:10:44 +08:00
marsio_buff_t * tx_buffs [ 1 ] ;
2023-11-24 15:17:18 +08:00
const char * packet_header_data = session_ctx - > ctrl_pkt_hdr_ptr ;
int packet_header_len = session_ctx - > ctrl_pkt_hdr_len ;
2023-03-14 16:10:44 +08:00
marsio_buff_malloc_global ( packet_io - > instance , tx_buffs , 1 , 0 , thread_index ) ;
2023-10-18 10:08:10 +08:00
char * dst = marsio_buff_append ( tx_buffs [ 0 ] , packet_header_len + size ) ;
memcpy ( dst , packet_header_data , packet_header_len ) ;
memcpy ( dst + packet_header_len , data , size ) ;
2023-03-14 16:10:44 +08:00
struct metadata meta = { 0 } ;
meta . session_id = session_ctx - > session_id ;
2023-10-18 10:08:10 +08:00
meta . l7offset = packet_header_len ;
2023-03-14 16:10:44 +08:00
meta . is_ctrl_pkt = 1 ;
meta . sids . num = 1 ;
meta . sids . elems [ 0 ] = sce_ctx - > firewall_sids ;
2023-11-24 15:17:18 +08:00
route_ctx_copy ( & meta . route_ctx , & ( session_ctx - > ctrl_route_ctx ) ) ;
2023-03-14 16:10:44 +08:00
mbuff_set_metadata ( tx_buffs [ 0 ] , & meta ) ;
int nsend = marsio_buff_datalen ( tx_buffs [ 0 ] ) ;
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_NEW ( packet_io - > instance , tx_buffs [ 0 ] ) ;
2023-10-18 10:08:10 +08:00
marsio_send_burst ( packet_io - > dev_nf . mr_path , thread_index , tx_buffs , 1 ) ;
2023-04-28 15:00:46 +08:00
free ( data ) ;
2023-03-14 16:10:44 +08:00
return nsend ;
}
2023-05-08 10:53:44 +08:00
static void send_event_log ( struct session_ctx * session_ctx , struct thread_ctx * thread_ctx )
{
int nsend = 0 ;
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-05-08 10:53:44 +08:00
2023-11-14 18:28:27 +08:00
if ( sce_ctx - > enable_send_log )
2023-05-08 10:53:44 +08:00
{
2023-11-23 16:52:06 +08:00
nsend = send_ctrl_packet ( session_ctx , thread_ctx ) ;
2023-05-08 10:53:44 +08:00
if ( nsend > 0 )
{
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > session_log ) ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > ctrl_tx ) , 1 , nsend ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_tx ) , 1 , nsend ) ;
2023-05-08 10:53:44 +08:00
}
}
}
2023-11-03 10:02:50 +08:00
static void dump_sf_metrics ( struct session_ctx * session_ctx , struct selected_chaining * chaining )
2023-03-14 16:10:44 +08:00
{
if ( chaining = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
return ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
for ( int i = 0 ; i < chaining - > chaining_used ; i + + )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
struct selected_sf * sf = & ( chaining - > chaining [ i ] ) ;
2023-11-03 10:02:50 +08:00
LOG_INFO ( " %s: session %lu %s metrics: policy %lu->%d->%d action %s->%s->%s rx_pkts %lu rx_bytes %lu tx_pkts %lu tx_bytes %lu " ,
2023-11-23 16:52:06 +08:00
LOG_TAG_SFMETRICS , session_ctx - > session_id , session_ctx - > session_addr ,
2023-11-03 10:02:50 +08:00
sf - > rule_id , sf - > sff_profile_id , sf - > sf_profile_id ,
2023-11-23 16:52:06 +08:00
traffic_type_tostring ( sf - > traffic_type ) , forward_type_tostring ( sf - > sff_forward_type ) , action_desc_tostring ( sf - > sf_action_desc ) ,
2023-11-03 10:02:50 +08:00
sf - > rx . n_pkts , sf - > rx . n_bytes , sf - > tx . n_pkts , sf - > tx . n_bytes ) ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
static void session_value_free_cb ( void * ctx )
{
struct session_ctx * s_ctx = ( struct session_ctx * ) ctx ;
session_ctx_free ( s_ctx ) ;
2023-02-10 14:22:40 +08:00
}
2023-11-23 16:52:06 +08:00
static void handle_policy_mutil_hits ( struct session_ctx * session_ctx , struct control_packet * ctrl_pkt , packet * data_pkt , int direction , struct thread_ctx * thread_ctx )
2023-02-10 14:22:40 +08:00
{
2023-11-23 11:04:08 +08:00
struct policy_enforcer * enforcer = thread_ctx - > ref_enforcer ;
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
2023-10-18 10:08:10 +08:00
for ( int i = 0 ; i < ctrl_pkt - > rule_id_num ; i + + )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
uint64_t rule_id = ctrl_pkt - > rule_ids [ i ] ;
if ( mutable_array_exist_elem ( & session_ctx - > rule_ids , rule_id ) )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
continue ;
}
else
{
2023-11-23 16:52:06 +08:00
policy_enforce_select_chainings ( enforcer , session_ctx , data_pkt , rule_id , direction ) ;
2023-03-14 16:10:44 +08:00
2023-11-23 11:04:08 +08:00
if ( sce_ctx - > enable_debug )
{
2023-11-23 16:52:06 +08:00
selected_chaining_bref ( session_ctx - > chaining_raw ) ;
selected_chaining_bref ( session_ctx - > chaining_decrypted ) ;
2023-11-23 11:04:08 +08:00
}
2023-03-14 16:10:44 +08:00
2023-10-18 10:08:10 +08:00
mutable_array_add_elem ( & session_ctx - > rule_ids , rule_id ) ;
2023-02-10 14:22:40 +08:00
}
}
2023-03-14 16:10:44 +08:00
}
2023-10-18 10:08:10 +08:00
static void handle_session_closing ( struct metadata * meta , struct control_packet * ctrl_pkt , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
struct session_table * session_table = thread_ctx - > session_table ;
2023-11-28 16:36:58 +08:00
struct session_ctx * s_ctx = ( struct session_ctx * ) session_table_search_by_id ( session_table , meta - > session_id ) ;
if ( s_ctx )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_INFO ( " %s: session %lu %s closing " , LOG_TAG_PKTIO , s_ctx - > session_id , s_ctx - > session_addr ) ;
2023-11-23 16:52:06 +08:00
dump_sf_metrics ( s_ctx , s_ctx - > chaining_raw ) ;
dump_sf_metrics ( s_ctx , s_ctx - > chaining_decrypted ) ;
2023-03-14 16:10:44 +08:00
session_table_delete_by_id ( session_table , meta - > session_id ) ;
2023-11-24 15:17:18 +08:00
ATOMIC_DEC ( & ( thread_metrics - > session_num ) ) ;
2024-03-07 10:38:05 +08:00
ATOMIC_INC ( & ( thread_metrics - > session_free ) ) ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
}
2023-02-10 14:22:40 +08:00
2023-11-29 11:21:07 +08:00
static void verify_dataoffset ( struct metadata * meta , struct packet * data_pkt , struct four_tuple * inner_tuple4 )
{
const char * payload = packet_parse ( data_pkt , meta - > raw_data , meta - > raw_len ) ;
uint16_t expect_offset = payload - meta - > raw_data ;
sce_packet_get_innermost_tuple ( data_pkt , inner_tuple4 ) ;
if ( expect_offset ! = meta - > l7offset )
{
char * addr_str = four_tuple_tostring ( inner_tuple4 ) ;
LOG_ERROR ( " %s: incorrect dataoffset %d in mbuff of session %lu %s (expect: %d) " , LOG_TAG_PKTIO , meta - > l7offset , meta - > session_id , addr_str , expect_offset ) ;
free ( addr_str ) ;
}
}
static struct session_ctx * new_session ( struct metadata * meta , struct four_tuple * inner_tuple4 , struct thread_ctx * thread_ctx )
{
struct policy_enforcer * enforcer = thread_ctx - > ref_enforcer ;
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
int chaining_size = policy_enforce_chaining_size ( enforcer ) ;
struct session_ctx * session_ctx = session_ctx_new ( ) ;
session_ctx - > session_id = meta - > session_id ;
session_ctx - > session_addr = sce_ctx - > enable_debug ? four_tuple_tostring ( inner_tuple4 ) : NULL ;
session_ctx - > rehash_index = meta - > rehash_index ;
session_ctx - > vxlan_src_port = calculate_vxlan_source_port ( inner_tuple4 ) ;
2023-12-05 16:48:03 +08:00
session_ctx - > ctrl_pkt_hdr_ptr = memdup ( meta - > raw_data , meta - > l7offset ) ;
session_ctx - > ctrl_pkt_hdr_len = meta - > l7offset ;
2023-11-29 11:21:07 +08:00
session_ctx - > chaining_raw = selected_chaining_create ( chaining_size , session_ctx - > session_id , session_ctx - > session_addr ) ;
session_ctx - > chaining_decrypted = selected_chaining_create ( chaining_size , session_ctx - > session_id , session_ctx - > session_addr ) ;
session_ctx - > ref_thread_ctx = thread_ctx ;
four_tuple_copy ( & session_ctx - > inner_tuple4 , inner_tuple4 ) ;
route_ctx_copy ( & session_ctx - > ctrl_route_ctx , & meta - > route_ctx ) ;
return session_ctx ;
}
2023-10-18 10:08:10 +08:00
static void handle_session_active ( struct metadata * meta , struct control_packet * ctrl_pkt , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
2023-11-29 11:21:07 +08:00
struct packet data_pkt ;
struct four_tuple inner_tuple4 ;
2023-03-14 16:10:44 +08:00
struct session_table * session_table = thread_ctx - > session_table ;
2023-11-23 16:52:06 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
2023-11-28 16:36:58 +08:00
struct session_ctx * session_ctx = ( struct session_ctx * ) session_table_search_by_id ( session_table , meta - > session_id ) ;
2023-11-29 11:21:07 +08:00
verify_dataoffset ( meta , & data_pkt , & inner_tuple4 ) ;
2023-11-28 16:36:58 +08:00
if ( session_ctx )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_INFO ( " %s: session %lu %s active again " , LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr ) ;
}
else
{
2023-11-29 11:21:07 +08:00
session_ctx = new_session ( meta , & inner_tuple4 , thread_ctx ) ;
2023-11-23 16:52:06 +08:00
LOG_INFO ( " %s: session %lu %s active first " , LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr ) ;
session_table_insert ( session_table , session_ctx - > session_id , & session_ctx - > inner_tuple4 , session_ctx , session_value_free_cb ) ;
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > session_num ) ) ;
2024-03-07 10:38:05 +08:00
ATOMIC_INC ( & ( thread_metrics - > session_new ) ) ;
2023-03-14 16:10:44 +08:00
}
2023-11-29 11:21:07 +08:00
handle_policy_mutil_hits ( session_ctx , ctrl_pkt , & data_pkt , meta - > direction , thread_ctx ) ;
send_event_log ( session_ctx , thread_ctx ) ;
2023-02-10 14:22:40 +08:00
}
2023-10-18 10:08:10 +08:00
static void handle_session_resetall ( struct metadata * meta , struct control_packet * ctrl_pkt , struct thread_ctx * thread_ctx )
2023-02-21 09:58:31 +08:00
{
2023-11-20 15:36:28 +08:00
struct global_metrics * global_metrics = thread_ctx - > ref_global_metrics ;
2023-03-14 16:10:44 +08:00
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
LOG_ERROR ( " %s: session %lu resetall: notification clears all session tables !!! " , LOG_TAG_PKTIO , meta - > session_id ) ;
2023-11-24 15:17:18 +08:00
ATOMIC_ZERO ( & ( global_metrics - > sum . session_num ) ) ;
2023-03-14 16:10:44 +08:00
for ( int i = 0 ; i < sce_ctx - > nr_worker_threads ; i + + )
{
struct thread_ctx * temp_ctx = & sce_ctx - > work_threads [ i ] ;
ATOMIC_INC ( & temp_ctx - > session_table_need_reset ) ;
}
2023-02-21 09:58:31 +08:00
}
2023-03-14 16:10:44 +08:00
/******************************************************************************
* handle control / raw / inject packet
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2023-02-23 18:15:21 +08:00
2023-11-22 16:16:59 +08:00
static void handle_control_packet ( marsio_buff_t * rx_buff , struct thread_ctx * thread_ctx , int raw_len )
2023-03-14 16:10:44 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-02-10 14:22:40 +08:00
struct metadata meta ;
2023-10-18 10:08:10 +08:00
struct control_packet ctrl_pkt ;
2024-02-29 18:18:30 +08:00
enum control_packet_state packet_state = CTRL_PKT_SUCCESS ;
2023-03-14 16:10:44 +08:00
2023-11-22 16:16:59 +08:00
memset ( & meta , 0 , sizeof ( struct metadata ) ) ;
2023-11-22 16:33:52 +08:00
meta . is_ctrl_pkt = 1 ;
2023-11-22 16:16:59 +08:00
meta . raw_len = raw_len ;
meta . raw_data = marsio_buff_mtod ( rx_buff ) ;
2023-03-14 16:10:44 +08:00
if ( mbuff_get_metadata ( rx_buff , & meta ) = = - 1 )
2023-02-10 14:22:40 +08:00
{
LOG_ERROR ( " %s: unexpected control packet, unable to get metadata " , LOG_TAG_PKTIO ) ;
2023-03-14 16:10:44 +08:00
goto error_ctrl_pkt ;
2023-02-10 14:22:40 +08:00
}
2024-02-29 18:18:30 +08:00
packet_state = control_packet_parse ( & ctrl_pkt , meta . raw_data + meta . l7offset , meta . raw_len - meta . l7offset ) ;
if ( packet_state ! = CTRL_PKT_SUCCESS )
2023-02-10 14:22:40 +08:00
{
LOG_ERROR ( " %s: unexpected control packet, unable to parse data " , LOG_TAG_PKTIO ) ;
2023-03-14 16:10:44 +08:00
goto error_ctrl_pkt ;
2023-02-10 14:22:40 +08:00
}
2023-10-18 10:08:10 +08:00
if ( ctrl_pkt . session_id ! = meta . session_id )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: unexpected control packet, metadata's session %lu != control packet's session %lu " , LOG_TAG_PKTIO , meta . session_id , ctrl_pkt . session_id ) ;
2024-02-29 18:18:30 +08:00
packet_state = CTRL_PKT_INVALID_SESSION_ID ;
2023-03-14 16:10:44 +08:00
goto error_ctrl_pkt ;
2023-03-09 21:00:59 +08:00
}
2023-02-23 18:15:21 +08:00
2023-10-18 10:08:10 +08:00
switch ( ctrl_pkt . state )
2023-02-10 14:22:40 +08:00
{
case SESSION_STATE_OPENING :
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > ctrl_opening ) ) ;
2023-02-10 14:22:40 +08:00
// when session opening, firewall not send policy id
2023-10-18 10:08:10 +08:00
// return handle_session_opening(&meta, &ctrl_pkt, ctx);
2023-02-10 14:22:40 +08:00
break ;
2023-02-23 18:15:21 +08:00
case SESSION_STATE_CLOSING :
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > ctrl_closing ) ) ;
2023-10-18 10:08:10 +08:00
handle_session_closing ( & meta , & ctrl_pkt , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
break ;
2023-02-10 14:22:40 +08:00
case SESSION_STATE_ACTIVE :
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > ctrl_active ) ) ;
2023-10-18 10:08:10 +08:00
handle_session_active ( & meta , & ctrl_pkt , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
break ;
2023-02-10 14:22:40 +08:00
case SESSION_STATE_RESETALL :
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > ctrl_resetall ) ) ;
2023-10-18 10:08:10 +08:00
handle_session_resetall ( & meta , & ctrl_pkt , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
break ;
2023-02-23 18:15:21 +08:00
default :
2023-03-14 16:10:44 +08:00
goto error_ctrl_pkt ;
2023-02-10 14:22:40 +08:00
}
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_CTRL ( thread_ctx - > ref_io - > instance , rx_buff , packet_state ) ;
2023-03-14 16:10:44 +08:00
return ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
error_ctrl_pkt :
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_CTRL ( thread_ctx - > ref_io - > instance , rx_buff , packet_state ) ;
2023-11-24 15:17:18 +08:00
ATOMIC_INC ( & ( thread_metrics - > ctrl_error ) ) ;
2023-03-14 16:10:44 +08:00
return ;
2023-02-10 14:22:40 +08:00
}
2023-11-22 16:16:59 +08:00
static void handle_data_packet ( marsio_buff_t * rx_buff , struct thread_ctx * thread_ctx , int raw_len )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
struct session_table * session_table = thread_ctx - > session_table ;
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-02-10 14:22:40 +08:00
struct metadata meta ;
2023-03-14 16:10:44 +08:00
struct session_ctx * session_ctx = NULL ;
struct selected_chaining * chaining = NULL ;
2023-02-10 14:22:40 +08:00
2023-11-22 16:16:59 +08:00
memset ( & meta , 0 , sizeof ( struct metadata ) ) ;
2023-11-22 16:33:52 +08:00
meta . is_ctrl_pkt = 0 ;
2023-11-22 16:16:59 +08:00
meta . raw_len = raw_len ;
meta . raw_data = marsio_buff_mtod ( rx_buff ) ;
2023-03-14 16:10:44 +08:00
if ( mbuff_get_metadata ( rx_buff , & meta ) = = - 1 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unexpected raw packet, unable to get metadata, bypass !!! " , LOG_TAG_PKTIO ) ;
goto error_bypass ;
2023-02-10 14:22:40 +08:00
}
2023-03-09 21:00:59 +08:00
2023-05-04 17:56:12 +08:00
// bypass_traffic:0 disable
// bypass_traffic:1 bypass all traffic
// bypass_traffic:2 bypass raw traffic
// bypass_traffic:3 bypass decrypted traffic
if ( unlikely ( thread_ctx - > ref_io - > config . bypass_traffic = = 2 & & meta . is_decrypted = = 0 ) )
{
2023-05-06 10:41:30 +08:00
LOG_DEBUG ( " %s: session %lu bypass, enable raw traffic bypass !!! " , LOG_TAG_PKTIO , meta . session_id ) ;
2023-05-04 17:56:12 +08:00
goto error_bypass ;
}
if ( unlikely ( thread_ctx - > ref_io - > config . bypass_traffic = = 3 & & meta . is_decrypted = = 1 ) )
{
2023-05-06 10:41:30 +08:00
LOG_DEBUG ( " %s: session %lu bypass, enable decrypted traffic bypass !!! " , LOG_TAG_PKTIO , meta . session_id ) ;
2023-05-04 17:56:12 +08:00
goto error_bypass ;
}
2023-11-29 13:59:06 +08:00
session_ctx = data_packet_search_session ( session_table , meta . raw_data , meta . raw_len , meta . session_id , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
if ( session_ctx = = NULL )
2023-03-09 21:00:59 +08:00
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > miss_sess ) , 1 , meta . raw_len ) ;
2023-03-14 16:10:44 +08:00
goto error_bypass ;
2023-03-09 21:00:59 +08:00
}
2023-02-10 14:22:40 +08:00
2023-05-06 10:41:30 +08:00
update_session_by_metadata ( session_ctx , & meta ) ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( meta . is_decrypted = = 1 )
2023-02-10 14:22:40 +08:00
{
2023-11-23 16:52:06 +08:00
chaining = session_ctx - > chaining_decrypted ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
else
2023-03-10 15:12:04 +08:00
{
2023-11-23 16:52:06 +08:00
chaining = session_ctx - > chaining_raw ;
2023-03-10 15:12:04 +08:00
}
2023-03-14 16:10:44 +08:00
if ( chaining = = NULL )
2023-03-10 15:12:04 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unexpected raw packet, session %lu %s misses policy, bypass !!! " , LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr ) ;
goto error_bypass ;
2023-03-10 15:12:04 +08:00
}
2023-03-14 16:10:44 +08:00
2024-03-06 11:56:07 +08:00
if ( meta . is_decrypted = = 1 )
{
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > dec_rx ) , 1 , meta . raw_len ) ;
}
else
{
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > raw_rx ) , 1 , meta . raw_len ) ;
}
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_POLICY ( thread_ctx - > ref_io - > instance , rx_buff , & session_ctx - > rule_ids , chaining ) ;
2024-04-26 17:37:54 +08:00
PACKET_TELEMETRY_ON_POLICY ( thread_ctx - > ref_io - > instance , rx_buff , & session_ctx - > rule_ids , chaining ) ;
2023-03-14 16:10:44 +08:00
action_sf_chaining ( thread_ctx , session_ctx , chaining , rx_buff , & meta , 0 ) ;
return ;
error_bypass :
2024-03-06 11:56:07 +08:00
if ( meta . is_decrypted = = 1 )
{
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > dec_rx ) , 1 , meta . raw_len ) ;
}
else
{
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > raw_rx ) , 1 , meta . raw_len ) ;
}
2023-03-14 16:10:44 +08:00
action_err_bypass ( rx_buff , & meta , NULL , thread_ctx ) ;
2023-02-10 14:22:40 +08:00
}
2023-11-22 16:16:59 +08:00
static void handle_inject_vxlan_packet ( marsio_buff_t * rx_buff , struct thread_ctx * thread_ctx , int raw_len )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
struct session_table * session_table = thread_ctx - > session_table ;
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2024-03-20 14:13:31 +08:00
struct packet_io * packet_io = thread_ctx - > ref_io ;
int thread_index = thread_ctx - > thread_index ;
2024-04-20 11:40:00 +08:00
struct sce_ctx * sce_ctx = thread_ctx - > ref_sce_ctx ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
struct metadata meta ;
2023-11-20 10:31:21 +08:00
struct vxlan_hdr * vxlan_hdr = NULL ;
2023-03-14 16:10:44 +08:00
struct session_ctx * session_ctx = NULL ;
struct selected_chaining * chaining = NULL ;
2023-05-25 17:05:22 +08:00
memset ( & meta , 0 , sizeof ( struct metadata ) ) ;
2023-03-14 16:10:44 +08:00
int sf_index = 0 ;
2023-02-10 14:22:40 +08:00
char * raw_data = marsio_buff_mtod ( rx_buff ) ;
2023-11-20 10:31:21 +08:00
if ( vxlan_frame_decode ( & vxlan_hdr , raw_data , raw_len ) = = - 1 )
2023-02-10 14:22:40 +08:00
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vxlan_drop ) , 1 , raw_len ) ;
2024-03-20 14:13:31 +08:00
// health check packet not increase err_block metrics
PACKET_TRACE_ON_FREE ( packet_io - > instance , rx_buff ) ;
marsio_buff_free ( packet_io - > instance , & rx_buff , 1 , 0 , thread_index ) ;
2023-05-25 17:05:22 +08:00
return ;
2023-02-10 14:22:40 +08:00
}
2023-11-20 10:31:21 +08:00
meta . raw_data = ( char * ) vxlan_hdr + sizeof ( struct vxlan_hdr ) ;
meta . raw_len = raw_len - VXLAN_FRAME_HDR_LEN ;
2023-03-14 16:10:44 +08:00
meta . l7offset = 0 ;
2023-02-10 14:22:40 +08:00
meta . is_ctrl_pkt = 0 ;
2024-04-20 11:40:00 +08:00
sf_index = vxlan_get_sf_index ( vxlan_hdr ) ;
meta . direction = vxlan_get_dir ( vxlan_hdr ) ;
meta . is_decrypted = vxlan_get_traffic ( vxlan_hdr ) ;
meta . link_id = vxlan_get_link_id ( vxlan_hdr ) ;
if ( vxlan_get_stateless ( vxlan_hdr ) )
{
meta . sids . num = 1 ;
meta . sids . elems [ 0 ] = sce_ctx - > stateless_sids ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > stateless_inject ) , 1 , meta . raw_len ) ;
marsio_buff_adj ( rx_buff , raw_len - meta . raw_len ) ;
action_nf_inject ( rx_buff , & meta , NULL , thread_ctx ) ;
return ;
}
2023-02-10 14:22:40 +08:00
2023-11-29 13:59:06 +08:00
session_ctx = inject_packet_search_session ( session_table , meta . raw_data , meta . raw_len , thread_ctx ) ;
2023-03-14 16:10:44 +08:00
if ( session_ctx = = NULL )
{
goto error_block ;
}
2023-02-10 14:22:40 +08:00
2023-05-06 10:41:30 +08:00
update_metadata_by_session ( session_ctx , & meta ) ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( meta . is_decrypted = = 1 )
2023-02-10 14:22:40 +08:00
{
2023-11-23 16:52:06 +08:00
chaining = session_ctx - > chaining_decrypted ;
2023-02-10 14:22:40 +08:00
}
else
{
2023-11-23 16:52:06 +08:00
chaining = session_ctx - > chaining_raw ;
2023-02-10 14:22:40 +08:00
}
2023-02-21 09:58:31 +08:00
if ( chaining = = NULL | | sf_index < 0 | | sf_index > = chaining - > chaining_used )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unexpected inject packet, session %lu %s misses chaining index, drop !!! " ,
LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr ) ;
goto error_block ;
2023-02-10 14:22:40 +08:00
}
2023-02-27 14:37:31 +08:00
if ( chaining - > chaining [ sf_index ] . sff_forward_type = = FORWARD_TYPE_MIRRORING )
{
2023-03-14 16:10:44 +08:00
LOG_DEBUG ( " %s: unexpected inject packet, session %lu %s with sf_profile_id %d executes mirror and does not require reflow, drop !!! " ,
LOG_TAG_PKTIO , session_ctx - > session_id , session_ctx - > session_addr , chaining - > chaining [ sf_index ] . sf_profile_id ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > mirr_rx_drop ) , 1 , meta . raw_len ) ;
2023-03-14 16:10:44 +08:00
goto error_block ;
}
else
{
struct selected_sf * sf = & ( chaining - > chaining [ sf_index ] ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & sf - > rx , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > stee_rx ) , 1 , meta . raw_len ) ;
2023-11-22 14:49:25 +08:00
struct sf_metrics_key key = { 0 } ;
key . rule_id = sf - > rule_id ;
key . sff_profile_id = sf - > sff_profile_id ;
key . sf_profile_id = sf - > sf_profile_id ;
key . vsys_id = sf - > rule_vsys_id ;
sf_metrics_inc ( thread_ctx - > sf_metrics , & key , 1 , raw_len , 0 , 0 ) ;
2023-02-27 14:37:31 +08:00
}
2023-03-14 16:10:44 +08:00
marsio_buff_adj ( rx_buff , raw_len - meta . raw_len ) ;
action_sf_chaining ( thread_ctx , session_ctx , chaining , rx_buff , & meta , sf_index + 1 ) ;
return ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
error_block :
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vxlan_drop ) , 1 , raw_len ) ;
2023-03-15 16:03:13 +08:00
marsio_buff_adj ( rx_buff , raw_len - meta . raw_len ) ;
2023-03-14 16:10:44 +08:00
action_err_block ( rx_buff , & meta , NULL , thread_ctx ) ;
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
/******************************************************************************
* packet io
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
// return 0 : success
// return -1 : error
static int packet_io_config ( const char * profile , struct config * config )
{
2023-05-04 17:56:12 +08:00
// bypass_traffic:0 disable
// bypass_traffic:1 bypass all traffic
// bypass_traffic:2 bypass raw traffic
// bypass_traffic:3 bypass decrypted traffic
MESA_load_profile_int_def ( profile , " PACKET_IO " , " bypass_traffic " , ( int * ) & ( config - > bypass_traffic ) , 0 ) ;
2023-03-14 16:10:44 +08:00
MESA_load_profile_int_def ( profile , " PACKET_IO " , " rx_burst_max " , ( int * ) & ( config - > rx_burst_max ) , 1 ) ;
2023-10-16 17:04:21 +08:00
MESA_load_profile_int_def ( profile , " PACKET_IO " , " min_timeout_ms " , ( int * ) & ( config - > min_timeout_ms ) , 900 ) ;
2023-03-14 16:10:44 +08:00
MESA_load_profile_string_nodef ( profile , " PACKET_IO " , " app_symbol " , config - > app_symbol , sizeof ( config - > app_symbol ) ) ;
2023-02-10 14:22:40 +08:00
2023-10-18 10:08:10 +08:00
MESA_load_profile_string_nodef ( profile , " PACKET_IO " , " dev_nf_name " , config - > dev_nf_name , sizeof ( config - > dev_nf_name ) ) ;
MESA_load_profile_string_nodef ( profile , " PACKET_IO " , " dev_endpoint_l3_name " , config - > dev_endpoint_l3_name , sizeof ( config - > dev_endpoint_l3_name ) ) ;
MESA_load_profile_string_nodef ( profile , " PACKET_IO " , " dev_endpoint_l3_ip " , config - > dev_endpoint_l3_ip_str , sizeof ( config - > dev_endpoint_l3_ip_str ) ) ;
MESA_load_profile_string_nodef ( profile , " PACKET_IO " , " dev_endpoint_l3_mac " , config - > dev_endpoint_l3_mac_str , sizeof ( config - > dev_endpoint_l3_mac_str ) ) ;
MESA_load_profile_string_nodef ( profile , " PACKET_IO " , " dev_endpoint_l2_name " , config - > dev_endpoint_l2_name , sizeof ( config - > dev_endpoint_l2_name ) ) ;
MESA_load_profile_int_def ( profile , " PACKET_IO " , " vlan_encapsulate_replace_orig_vlan_header " , ( int * ) & ( config - > vlan_encapsulate_replace_orig_vlan_header ) , 0 ) ;
2023-02-27 14:37:31 +08:00
2023-03-14 16:10:44 +08:00
if ( config - > rx_burst_max > RX_BURST_MAX )
{
LOG_ERROR ( " %s: invalid rx_burst_max, exceeds limit %d " , LOG_TAG_PKTIO , RX_BURST_MAX ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
if ( strlen ( config - > app_symbol ) = = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: invalid app_symbol in %s " , LOG_TAG_PKTIO , profile ) ;
return - 1 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
2023-10-18 10:08:10 +08:00
if ( strlen ( config - > dev_nf_name ) = = 0 )
{
LOG_ERROR ( " %s: invalid dev_nf_name in %s " , LOG_TAG_PKTIO , profile ) ;
return - 1 ;
}
if ( strlen ( config - > dev_endpoint_l3_name ) = = 0 )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: invalid dev_endpoint_l3_name in %s " , LOG_TAG_PKTIO , profile ) ;
2023-03-14 16:10:44 +08:00
return - 1 ;
}
2023-10-18 10:08:10 +08:00
if ( strlen ( config - > dev_endpoint_l2_name ) = = 0 )
2023-03-14 16:10:44 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: invalid dev_endpoint_l2_name in %s " , LOG_TAG_PKTIO , profile ) ;
2023-03-14 16:10:44 +08:00
return - 1 ;
}
2023-10-18 10:08:10 +08:00
LOG_DEBUG ( " %s: PACKET_IO->bypass_traffic : %d " , LOG_TAG_PKTIO , config - > bypass_traffic ) ;
LOG_DEBUG ( " %s: PACKET_IO->rx_burst_max : %d " , LOG_TAG_PKTIO , config - > rx_burst_max ) ;
LOG_DEBUG ( " %s: PACKET_IO->min_timeout_ms : %d " , LOG_TAG_PKTIO , config - > min_timeout_ms ) ;
LOG_DEBUG ( " %s: PACKET_IO->app_symbol : %s " , LOG_TAG_PKTIO , config - > app_symbol ) ;
LOG_DEBUG ( " %s: PACKET_IO->dev_nf_name : %s " , LOG_TAG_PKTIO , config - > dev_nf_name ) ;
2023-11-03 10:02:50 +08:00
2023-10-18 10:08:10 +08:00
LOG_DEBUG ( " %s: PACKET_IO->dev_endpoint_l3_name : %s " , LOG_TAG_PKTIO , config - > dev_endpoint_l3_name ) ;
LOG_DEBUG ( " %s: PACKET_IO->dev_endpoint_l3_ip : %s " , LOG_TAG_PKTIO , config - > dev_endpoint_l3_ip_str ) ;
2023-11-03 10:02:50 +08:00
2023-10-18 10:08:10 +08:00
LOG_DEBUG ( " %s: PACKET_IO->dev_endpoint_l2_name : %s " , LOG_TAG_PKTIO , config - > dev_endpoint_l2_name ) ;
LOG_DEBUG ( " %s: PACKET_IO->vlan_encapsulate_replace_orig_vlan_header : %d " , LOG_TAG_PKTIO , config - > vlan_encapsulate_replace_orig_vlan_header ) ;
2023-03-14 16:10:44 +08:00
return 0 ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
struct packet_io * packet_io_create ( const char * profile , int thread_num , cpu_set_t * coremask )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
int opt = 1 ;
struct packet_io * handle = ( struct packet_io * ) calloc ( 1 , sizeof ( struct packet_io ) ) ;
assert ( handle ! = NULL ) ;
handle - > thread_num = thread_num ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( packet_io_config ( profile , & ( handle - > config ) ) ! = 0 )
{
goto error_out ;
}
2023-02-21 09:58:31 +08:00
2023-03-14 16:10:44 +08:00
handle - > instance = marsio_create ( ) ;
if ( handle - > instance = = NULL )
{
LOG_ERROR ( " %s: unable to create marsio instance " , LOG_TAG_PKTIO ) ;
goto error_out ;
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( marsio_option_set ( handle - > instance , MARSIO_OPT_THREAD_MASK_IN_CPUSET , coremask , sizeof ( cpu_set_t ) ) ! = 0 )
{
LOG_ERROR ( " %s: unable to set MARSIO_OPT_EXIT_WHEN_ERR option for marsio instance " , LOG_TAG_PKTIO ) ;
goto error_out ;
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
if ( marsio_option_set ( handle - > instance , MARSIO_OPT_EXIT_WHEN_ERR , & opt , sizeof ( opt ) ) ! = 0 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to set MARSIO_OPT_EXIT_WHEN_ERR option for marsio instance " , LOG_TAG_PKTIO ) ;
goto error_out ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
if ( marsio_init ( handle - > instance , handle - > config . app_symbol ) ! = 0 )
2023-02-27 14:37:31 +08:00
{
2023-03-14 16:10:44 +08:00
LOG_ERROR ( " %s: unable to initialize marsio instance " , LOG_TAG_PKTIO ) ;
goto error_out ;
2023-02-27 14:37:31 +08:00
}
2023-10-18 10:08:10 +08:00
handle - > dev_nf . mr_dev = marsio_open_device ( handle - > instance , handle - > config . dev_nf_name , handle - > thread_num , handle - > thread_num ) ;
if ( handle - > dev_nf . mr_dev = = NULL )
{
LOG_ERROR ( " %s: unable to open device %s " , LOG_TAG_PKTIO , handle - > config . dev_nf_name ) ;
goto error_out ;
}
handle - > dev_nf . mr_path = marsio_sendpath_create_by_vdev ( handle - > dev_nf . mr_dev ) ;
if ( handle - > dev_nf . mr_path = = NULL )
2023-02-27 14:37:31 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: unable to create sendpath for device %s " , LOG_TAG_PKTIO , handle - > config . dev_nf_name ) ;
2023-03-14 16:10:44 +08:00
goto error_out ;
2023-02-27 14:37:31 +08:00
}
2023-10-18 10:08:10 +08:00
handle - > dev_endpoint_l3 . mr_dev = marsio_open_device ( handle - > instance , handle - > config . dev_endpoint_l3_name , handle - > thread_num , handle - > thread_num ) ;
if ( handle - > dev_endpoint_l3 . mr_dev = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: unable to open device %s " , LOG_TAG_PKTIO , handle - > config . dev_endpoint_l3_name ) ;
2023-03-14 16:10:44 +08:00
goto error_out ;
2023-02-10 14:22:40 +08:00
}
2023-10-18 10:08:10 +08:00
handle - > dev_endpoint_l3 . mr_path = marsio_sendpath_create_by_vdev ( handle - > dev_endpoint_l3 . mr_dev ) ;
if ( handle - > dev_endpoint_l3 . mr_path = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: unable to create sendpath for device %s " , LOG_TAG_PKTIO , handle - > config . dev_endpoint_l3_name ) ;
2023-03-14 16:10:44 +08:00
goto error_out ;
2023-02-10 14:22:40 +08:00
}
2023-11-03 10:02:50 +08:00
if ( strlen ( handle - > config . dev_endpoint_l3_mac_str ) = = 0 )
{
marsio_get_device_ether_addr ( handle - > dev_endpoint_l3 . mr_dev , handle - > config . dev_endpoint_l3_mac_str , sizeof ( handle - > config . dev_endpoint_l3_mac_str ) ) ;
LOG_DEBUG ( " %s: PACKET_IO->dev_endpoint_l3_mac : %s (get from marsio api) " , LOG_TAG_PKTIO , handle - > config . dev_endpoint_l3_mac_str ) ;
}
str_to_mac ( handle - > config . dev_endpoint_l3_mac_str , handle - > config . dev_endpoint_l3_mac ) ;
handle - > config . dev_endpoint_l3_ip = inet_addr ( handle - > config . dev_endpoint_l3_ip_str ) ;
2023-10-18 10:08:10 +08:00
handle - > dev_endpoint_l2 . mr_dev = marsio_open_device ( handle - > instance , handle - > config . dev_endpoint_l2_name , handle - > thread_num , handle - > thread_num ) ;
if ( handle - > dev_endpoint_l2 . mr_dev = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: unable to open device %s " , LOG_TAG_PKTIO , handle - > config . dev_endpoint_l2_name ) ;
2023-03-14 16:10:44 +08:00
goto error_out ;
2023-02-10 14:22:40 +08:00
}
2023-10-18 10:08:10 +08:00
handle - > dev_endpoint_l2 . mr_path = marsio_sendpath_create_by_vdev ( handle - > dev_endpoint_l2 . mr_dev ) ;
if ( handle - > dev_endpoint_l2 . mr_path = = NULL )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
LOG_ERROR ( " %s: unable to create sendpath for device %s " , LOG_TAG_PKTIO , handle - > config . dev_endpoint_l2_name ) ;
goto error_out ;
}
2023-03-14 16:10:44 +08:00
return handle ;
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
error_out :
packet_io_destory ( handle ) ;
return NULL ;
}
2023-02-21 09:58:31 +08:00
2023-03-14 16:10:44 +08:00
void packet_io_destory ( struct packet_io * handle )
{
if ( handle )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
if ( handle - > dev_nf . mr_path )
{
marsio_sendpath_destory ( handle - > dev_nf . mr_path ) ;
handle - > dev_nf . mr_path = NULL ;
}
if ( handle - > dev_nf . mr_dev )
{
marsio_close_device ( handle - > dev_nf . mr_dev ) ;
handle - > dev_nf . mr_dev = NULL ;
}
if ( handle - > dev_endpoint_l3 . mr_path )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
marsio_sendpath_destory ( handle - > dev_endpoint_l3 . mr_path ) ;
handle - > dev_endpoint_l3 . mr_path = NULL ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
2023-10-18 10:08:10 +08:00
if ( handle - > dev_endpoint_l3 . mr_dev )
2023-02-10 14:22:40 +08:00
{
2023-10-18 10:08:10 +08:00
marsio_close_device ( handle - > dev_endpoint_l3 . mr_dev ) ;
handle - > dev_endpoint_l3 . mr_dev = NULL ;
2023-02-10 14:22:40 +08:00
}
2023-10-18 10:08:10 +08:00
if ( handle - > dev_endpoint_l2 . mr_path )
2023-03-14 16:10:44 +08:00
{
2023-10-18 10:08:10 +08:00
marsio_sendpath_destory ( handle - > dev_endpoint_l2 . mr_path ) ;
handle - > dev_endpoint_l2 . mr_path = NULL ;
2023-03-14 16:10:44 +08:00
}
2023-03-06 15:09:01 +08:00
2023-10-18 10:08:10 +08:00
if ( handle - > dev_endpoint_l2 . mr_dev )
2023-03-14 16:10:44 +08:00
{
2023-10-18 10:08:10 +08:00
marsio_close_device ( handle - > dev_endpoint_l2 . mr_dev ) ;
handle - > dev_endpoint_l2 . mr_dev = NULL ;
2023-03-14 16:10:44 +08:00
}
2023-03-06 15:09:01 +08:00
2023-03-14 16:10:44 +08:00
if ( handle - > instance )
2023-03-06 15:09:01 +08:00
{
2023-03-14 16:10:44 +08:00
marsio_destory ( handle - > instance ) ;
handle - > instance = NULL ;
2023-03-06 15:09:01 +08:00
}
2023-03-14 16:10:44 +08:00
free ( handle ) ;
handle = NULL ;
2023-03-06 15:09:01 +08:00
}
2023-03-14 16:10:44 +08:00
}
2023-03-06 15:09:01 +08:00
2023-11-29 09:52:20 +08:00
int packet_io_init ( struct packet_io * handle , struct thread_ctx * thread_ctx )
2023-03-14 16:10:44 +08:00
{
if ( marsio_thread_init ( handle - > instance ) ! = 0 )
{
LOG_ERROR ( " %s: unable to init marsio thread %d " , LOG_TAG_PKTIO , thread_ctx - > thread_index ) ;
return - 1 ;
}
2023-03-06 15:09:01 +08:00
2023-03-14 16:10:44 +08:00
return 0 ;
}
2023-03-06 15:09:01 +08:00
2023-11-29 09:52:20 +08:00
void packet_io_wait ( struct packet_io * handle , struct thread_ctx * thread_ctx , int timeout_ms )
2023-03-14 16:10:44 +08:00
{
2023-11-03 10:02:50 +08:00
struct mr_vdev * vdevs [ 3 ] = {
2023-10-18 10:08:10 +08:00
handle - > dev_nf . mr_dev ,
handle - > dev_endpoint_l3 . mr_dev ,
handle - > dev_endpoint_l2 . mr_dev ,
} ;
2023-03-06 15:09:01 +08:00
2023-10-16 17:04:21 +08:00
int min_timeout_ms = MIN ( handle - > config . min_timeout_ms , timeout_ms ) ;
if ( min_timeout_ms > 0 )
{
2023-10-18 10:08:10 +08:00
marsio_poll_wait ( handle - > instance , vdevs , 3 , thread_ctx - > thread_index , min_timeout_ms ) ;
2023-10-16 17:04:21 +08:00
}
else
{
return ;
}
2023-03-06 15:09:01 +08:00
}
2023-11-29 09:52:20 +08:00
int packet_io_polling_nf ( struct packet_io * handle , struct thread_ctx * thread_ctx )
2023-02-10 14:22:40 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
int thread_index = thread_ctx - > thread_index ;
2023-02-10 14:22:40 +08:00
2023-10-16 17:04:21 +08:00
static __thread marsio_buff_t * rx_buffs [ RX_BURST_MAX ] ;
2023-10-18 10:08:10 +08:00
int nr_recv = marsio_recv_burst ( handle - > dev_nf . mr_dev , thread_index , rx_buffs , handle - > config . rx_burst_max ) ;
2023-03-14 16:10:44 +08:00
if ( nr_recv < = 0 )
2023-02-10 14:22:40 +08:00
{
return 0 ;
}
2023-05-04 17:56:12 +08:00
if ( handle - > config . bypass_traffic = = 1 )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
for ( int j = 0 ; j < nr_recv ; j + + )
2023-02-10 14:22:40 +08:00
{
2024-02-29 18:18:30 +08:00
marsio_buff_t * rx_buff = rx_buffs [ j ] ;
int raw_len = marsio_buff_datalen ( rx_buff ) ;
2023-02-21 09:58:31 +08:00
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_tx ) , 1 , raw_len ) ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
2023-10-18 10:08:10 +08:00
marsio_send_burst ( handle - > dev_nf . mr_path , thread_index , rx_buffs , nr_recv ) ;
2023-03-14 16:10:44 +08:00
return nr_recv ;
2023-02-10 14:22:40 +08:00
}
2023-03-14 16:10:44 +08:00
for ( int j = 0 ; j < nr_recv ; j + + )
2023-02-10 14:22:40 +08:00
{
2023-03-14 16:10:44 +08:00
marsio_buff_t * rx_buff = rx_buffs [ j ] ;
int raw_len = marsio_buff_datalen ( rx_buff ) ;
2023-02-10 14:22:40 +08:00
2023-11-22 16:16:59 +08:00
if ( is_downlink_keepalive_packet ( rx_buff , raw_len ) )
2023-03-14 16:10:44 +08:00
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_tx ) , 1 , raw_len ) ;
2023-02-10 14:22:40 +08:00
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > downlink_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > downlink_tx ) , 1 , raw_len ) ;
2023-02-10 14:22:40 +08:00
2023-10-18 10:08:10 +08:00
marsio_send_burst ( handle - > dev_nf . mr_path , thread_index , & rx_buff , 1 ) ;
2023-03-14 16:10:44 +08:00
}
else if ( marsio_buff_is_ctrlbuf ( rx_buff ) )
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_tx ) , 1 , raw_len ) ;
2023-02-10 14:22:40 +08:00
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > ctrl_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > ctrl_tx ) , 1 , raw_len ) ;
2023-02-10 14:22:40 +08:00
2023-11-22 16:16:59 +08:00
handle_control_packet ( rx_buff , thread_ctx , raw_len ) ;
2023-10-18 10:08:10 +08:00
marsio_send_burst ( handle - > dev_nf . mr_path , thread_index , & rx_buff , 1 ) ;
2023-03-14 16:10:44 +08:00
}
else
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > nf_rx ) , 1 , raw_len ) ;
2023-03-14 16:10:44 +08:00
2023-11-22 16:16:59 +08:00
handle_data_packet ( rx_buff , thread_ctx , raw_len ) ;
2023-03-14 16:10:44 +08:00
}
2023-02-17 17:45:39 +08:00
}
2023-02-10 14:22:40 +08:00
2023-03-14 16:10:44 +08:00
return nr_recv ;
2023-02-10 14:22:40 +08:00
}
2023-11-29 09:52:20 +08:00
int packet_io_polling_endpoint_l3 ( struct packet_io * handle , struct thread_ctx * thread_ctx )
2023-02-10 14:22:40 +08:00
{
2023-10-12 16:31:53 +08:00
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
2023-03-14 16:10:44 +08:00
int thread_index = thread_ctx - > thread_index ;
2023-02-24 11:49:25 +08:00
2023-10-16 17:04:21 +08:00
static __thread marsio_buff_t * rx_buffs [ RX_BURST_MAX ] ;
2023-10-18 10:08:10 +08:00
int nr_recv = marsio_recv_burst ( handle - > dev_endpoint_l3 . mr_dev , thread_index , rx_buffs , handle - > config . rx_burst_max ) ;
2023-03-14 16:10:44 +08:00
if ( nr_recv < = 0 )
2023-02-24 11:49:25 +08:00
{
return 0 ;
}
2023-03-14 16:10:44 +08:00
for ( int j = 0 ; j < nr_recv ; j + + )
2023-02-27 14:37:31 +08:00
{
2023-03-14 16:10:44 +08:00
marsio_buff_t * rx_buff = rx_buffs [ j ] ;
int raw_len = marsio_buff_datalen ( rx_buff ) ;
2023-02-27 14:37:31 +08:00
2023-11-22 16:16:59 +08:00
if ( is_uplink_keepalive_packet ( rx_buff , raw_len ) )
2023-03-14 16:10:44 +08:00
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vxlan_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > uplink_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > uplink_tx_drop ) , 1 , raw_len ) ;
2023-03-14 16:10:44 +08:00
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_FREE ( handle - > instance , rx_buff ) ;
2023-03-14 16:10:44 +08:00
marsio_buff_free ( handle - > instance , & rx_buff , 1 , 0 , thread_index ) ;
}
else
{
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vxlan_rx ) , 1 , raw_len ) ;
2023-03-14 16:10:44 +08:00
2023-11-22 16:16:59 +08:00
handle_inject_vxlan_packet ( rx_buff , thread_ctx , raw_len ) ;
2023-03-14 16:10:44 +08:00
}
2023-02-27 14:37:31 +08:00
}
2023-03-14 16:10:44 +08:00
return nr_recv ;
}
2023-03-30 17:44:33 +08:00
2023-11-29 09:52:20 +08:00
int packet_io_polling_endpoint_l2 ( struct packet_io * handle , struct thread_ctx * thread_ctx )
2023-10-18 10:08:10 +08:00
{
struct thread_metrics * thread_metrics = & thread_ctx - > thread_metrics ;
int thread_index = thread_ctx - > thread_index ;
static __thread marsio_buff_t * rx_buffs [ RX_BURST_MAX ] ;
int nr_recv = marsio_recv_burst ( handle - > dev_endpoint_l2 . mr_dev , thread_index , rx_buffs , handle - > config . rx_burst_max ) ;
if ( nr_recv < = 0 )
{
return 0 ;
}
for ( int j = 0 ; j < nr_recv ; j + + )
{
marsio_buff_t * rx_buff = rx_buffs [ j ] ;
int raw_len = marsio_buff_datalen ( rx_buffs [ j ] ) ;
2023-11-24 15:17:18 +08:00
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vlan_rx ) , 1 , raw_len ) ;
THROUGHPUT_METRICS_INC ( & ( thread_metrics - > endpoint_vlan_drop ) , 1 , raw_len ) ;
2023-10-18 10:08:10 +08:00
2024-02-29 18:18:30 +08:00
PACKET_TRACE_ON_FREE ( handle - > instance , rx_buff ) ;
2023-10-18 10:08:10 +08:00
marsio_buff_free ( handle - > instance , & rx_buff , 1 , 0 , thread_index ) ;
}
return nr_recv ;
}
2023-03-30 17:44:33 +08:00
struct mr_instance * packet_io_get_mr_instance ( struct packet_io * handle )
{
if ( handle )
{
return handle - > instance ;
}
else
{
return NULL ;
}
}