94 u32 * last_sw_if_index,
u32 * cnt,
100 if (*last_sw_if_index == sw_if_index)
127 u32 n_left_from, *from;
136 u32 last_sw_if_index = ~0;
154 while (n_left_from >= 4)
159 if (n_left_from >= 12)
182 x |= sw_if_index[0] ^ last_sw_if_index;
183 x |= sw_if_index[1] ^ last_sw_if_index;
184 x |= sw_if_index[2] ^ last_sw_if_index;
185 x |= sw_if_index[3] ^ last_sw_if_index;
210 &last_sw_if_index, &cnt, &arc_enabled);
212 &last_sw_if_index, &cnt, &arc_enabled);
214 &last_sw_if_index, &cnt, &arc_enabled);
216 &last_sw_if_index, &cnt, &arc_enabled);
308 #ifndef CLIB_MARCH_VARIANT 310 #define _(sym,string) string, 319 .vector_size =
sizeof (
u32),
341 .name =
"ip4-input-no-checksum",
342 .vector_size =
sizeof (
u32),
344 .sibling_of =
"ip4-input",
364 pn =
pg_get_node (ip4_input_no_checksum_node.index);
vnet_main_t * vnet_get_main(void)
#define VLIB_MAIN_LOOP_ENTER_FUNCTION(x)
vnet_interface_main_t interface_main
void throttle_init(throttle_t *t, u32 n_threads, f64 time)
ip_lookup_main_t lookup_main
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
#define VLIB_NODE_FN(node)
static uword ip4_address_is_multicast(const ip4_address_t *a)
static clib_error_t * ip4_cli_init(vlib_main_t *vm)
u8 mcast_feature_arc_index
Feature arc indices.
static pg_node_t * pg_get_node(uword node_index)
#define static_always_inline
#define VLIB_INIT_FUNCTION(x)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
A collection of simple counters.
#define vlib_call_init_function(vm, x)
struct ip4_main_t::@212 host_config
Template information for VPP generated packets.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
vlib_simple_counter_main_t * sw_if_counters
vlib_thread_main_t vlib_thread_main
#define foreach_ip4_error
#define VLIB_REGISTER_NODE(x,...)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
clib_error_t * ip4_source_check_init(vlib_main_t *vm)
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
unformat_function_t * unformat_edit
#define vlib_prefetch_buffer_data(b, type)
void hdlc_register_input_protocol(vlib_main_t *vm, hdlc_protocol_t protocol, u32 node_index)
throttle_t arp_throttle
ARP throttling.
void ethernet_register_input_type(vlib_main_t *vm, ethernet_type_t type, u32 node_index)
struct _vlib_node_registration vlib_node_registration_t
u8 ucast_feature_arc_index
void vlib_trace_frame_buffers_only(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, uword n_buffers, uword next_buffer_stride, uword n_buffer_data_bytes_in_trace)
clib_error_t * ip4_source_and_port_range_check_init(vlib_main_t *vm)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
ip4_main_t ip4_main
Global ip4 main structure.
void ppp_register_input_protocol(vlib_main_t *vm, ppp_protocol_t protocol, u32 node_index)
u16 flags
Copy of main node flags.
u32 flow_hash_seed
Seed for Jenkins hash used to compute ip4 flow hash.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
u8 ttl
TTL to use for host generated packets.
static_always_inline void vnet_feature_arc_start(u8 arc, u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)