21 #include <sys/ioctl.h> 33 #define foreach_memif_input_error \ 34 _(BUFFER_ALLOC_FAIL, "buffer allocation failed") \ 35 _(NOT_IP, "not ip packet") 39 #define _(f,s) MEMIF_INPUT_ERROR_##f, 58 static __clib_unused
u8 *
66 s =
format (s,
"memif: hw_if_index %d next-index %d",
109 u16 buffer_offset,
u16 buffer_vec_index)
134 seg->
flags |= VLIB_BUFFER_NEXT_PRESENT;
159 rv = d->
flags & (~valid_flags);
166 mif->
flags |= MEMIF_IF_FLAG_ERROR;
187 u32 n_rx_packets = 0, n_rx_bytes = 0;
188 u32 n_left, n_left_to_next, next_index;
194 u16 cur_slot, last_slot, ring_size, n_slots, mask;
196 u16 n_buffers = 0, n_alloc;
200 void *last_region_shm = 0;
205 mask = ring_size - 1;
215 if (cur_slot == last_slot)
217 n_slots = last_slot - cur_slot;
222 u32 dst_off, src_off, n_bytes_left;
231 dst_off = start_offset;
236 s0 = cur_slot & mask;
237 d0 = &ring->
desc[s0];
238 n_bytes_left = d0->
length;
252 mb0 = last_region_shm + d0->
offset;
256 u32 dst_free = buffer_size - dst_off;
260 dst_free = buffer_size;
263 u32 bytes_to_copy =
clib_min (dst_free, n_bytes_left);
266 n_bytes_left -= bytes_to_copy;
267 src_off += bytes_to_copy;
268 dst_off += bytes_to_copy;
290 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
378 u32 n_from = n_rx_packets;
475 u32 n_left = n_rx_packets;
479 while (n_trace && n_left)
503 n_left_to_next -= n_rx_packets;
522 n_slots = ring_size - head + mq->
last_tail;
526 u16 s = head++ & mask;
549 u32 n_rx_packets = 0, n_rx_bytes = 0;
550 u32 *to_next = 0, *buffers;
551 u32 bi0, bi1, bi2, bi3;
558 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
566 mask = ring_size - 1;
577 last_slot = ring->
tail;
578 if (cur_slot == last_slot)
580 n_slots = last_slot - cur_slot;
589 s0 = cur_slot & mask;
591 ptd->
buffers[n_rx_packets++] = bi0;
595 d0 = &ring->
desc[s0];
608 hb->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
610 s0 = cur_slot & mask;
611 d0 = &ring->
desc[s0];
616 b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
635 n_from = n_rx_packets;
641 u32 next0, next1, next2, next3;
644 while (n_from >= 8 && n_left_to_next >= 4)
656 to_next[0] = bi0 = buffers[0];
657 to_next[1] = bi1 = buffers[1];
658 to_next[2] = bi2 = buffers[2];
659 to_next[3] = bi3 = buffers[3];
696 next0 = next1 = next2 = next3 = next_index;
723 n_left_to_next, bi0, bi1, bi2, bi3,
724 next0, next1, next2, next3);
729 while (n_from && n_left_to_next)
732 to_next[0] = bi0 = buffers[0];
764 n_left_to_next, bi0, next0);
782 n_slots = ring_size - head + mq->
last_tail;
789 dt->
length = buffer_length;
798 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
801 while (n_alloc >= 32)
803 bi0 = mq->
buffers[(head + 4) & mask];
805 bi1 = mq->
buffers[(head + 5) & mask];
807 bi2 = mq->
buffers[(head + 6) & mask];
809 bi3 = mq->
buffers[(head + 7) & mask];
817 d0 = &ring->
desc[s0];
818 d1 = &ring->
desc[s1];
819 d2 = &ring->
desc[s2];
820 d3 = &ring->
desc[s3];
851 d0 = &ring->
desc[s0];
884 if ((mif->
flags & MEMIF_IF_FLAG_ADMIN_UP) &&
885 (mif->
flags & MEMIF_IF_FLAG_CONNECTED))
887 if (mif->
flags & MEMIF_IF_FLAG_ZERO_COPY)
891 dq->queue_id, mode_ip);
894 dq->queue_id, mode_eth);
896 else if (mif->
flags & MEMIF_IF_FLAG_IS_SLAVE)
926 .name =
"memif-input",
927 .sibling_of =
"device-input",
930 .state = VLIB_NODE_STATE_INTERRUPT,
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
vnet_main_t * vnet_get_main(void)
vnet_interface_main_t interface_main
struct memif_if_t::@509 run
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define CLIB_MEMORY_STORE_BARRIER()
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
u16 current_length
Nbytes between current data and the end of this buffer.
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
uint16_t memif_region_index_t
#define VLIB_NODE_FN(node)
u16 first_buffer_vec_index
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
static u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
vlib_error_t * errors
Vector of errors for this node.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
vlib_combined_counter_main_t * combined_sw_if_counters
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
static __clib_unused char * memif_input_error_strings[]
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
memif_packet_op_t packet_ops[MEMIF_RX_VECTOR_SZ]
vlib_error_t error
Error code for buffers to be enqueued to error handler.
memif_region_index_t region
memif_copy_op_t * copy_ops
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define foreach_memif_input_error
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
static_always_inline u32 sat_sub(u32 x, u32 y)
static_always_inline void memif_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, vlib_buffer_t *b, u32 next, u16 qid, uword *n_tracep)
vl_api_vxlan_gbp_api_tunnel_mode_t mode
static_always_inline uword memif_device_input_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, u16 qid, memif_interface_mode_t mode)
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
#define MEMIF_DESC_FLAG_NEXT
vlib_buffer_t buffer_template
memif_region_offset_t offset
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define foreach_device_and_queue(var, vec)
static_always_inline void memif_add_to_chain(vlib_main_t *vm, vlib_buffer_t *b, u32 *buffers, u32 buffer_size)
#define MEMIF_RX_VECTOR_SZ
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
VLIB buffer representation.
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
static_always_inline u32 memif_desc_is_invalid(memif_if_t *mif, memif_desc_t *d, u32 buffer_length)
memif_log2_ring_size_t log2_ring_size
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
memif_per_thread_data_t * per_thread_data
memif_queue_t * rx_queues
memif_desc_t desc_template
static void vlib_frame_no_append(vlib_frame_t *f)
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
#define CLIB_CACHE_LINE_BYTES
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
memif_interface_mode_t mode
memif_region_size_t region_size