57 #ifndef CLIB_MARCH_VARIANT 63 interface_output_trace_t *t = va_arg (*va, interface_output_trace_t *);
85 #define _(bit, name, v, x) \ 86 if (v && (t->flags & VNET_BUFFER_F_##name)) \ 87 s = format (s, "%s ", v); 90 if (t->
flags & VNET_BUFFER_F_GSO)
92 s =
format (s,
"\n%Ugso_sz %d gso_l4_hdr_sz %d",
118 interface_output_trace_t *t0, *t1;
130 if (b0->
flags & VLIB_BUFFER_IS_TRACED)
140 if (b1->
flags & VLIB_BUFFER_IS_TRACED)
158 interface_output_trace_t *t0;
164 if (b0->
flags & VLIB_BUFFER_IS_TRACED)
187 int is_ip4 = (b->
flags & VNET_BUFFER_F_IS_IP4) != 0;
188 int is_ip6 = (b->
flags & VNET_BUFFER_F_IS_IP6) != 0;
190 ASSERT (!(is_ip4 && is_ip6));
200 if (b->
flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
202 if (b->
flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
207 if (b->
flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
213 if (b->
flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
219 if (b->
flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
227 b->
flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
228 b->
flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
229 b->
flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
241 u16 n_bufs = (n_bytes_b0 - l234_sz + (gso_size - 1)) / gso_size;
248 if (n_alloc < n_bufs)
262 nb0->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID |
flags;
271 u16 gso_size,
u8 ** p_dst_ptr,
u16 * p_dst_left,
279 *p_dst_ptr = nb0->
data + template_data_sz;
283 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
295 tcp->flags = tcp_flags;
322 int is_ip4 = sb0->
flags & VNET_BUFFER_F_IS_IP4;
323 int is_ip6 = sb0->
flags & VNET_BUFFER_F_IS_IP6;
324 ASSERT (is_ip4 || is_ip6);
325 ASSERT (sb0->
flags & VNET_BUFFER_F_L2_HDR_OFFSET_VALID);
326 ASSERT (sb0->
flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID);
327 ASSERT (sb0->
flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
331 u8 save_tcp_flags = 0;
332 u8 tcp_flags_no_fin_psh = 0;
333 u32 next_tcp_seq = 0;
337 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
339 save_tcp_flags = tcp->flags;
340 tcp_flags_no_fin_psh = tcp->flags & ~(
TCP_FLAG_FIN | TCP_FLAG_PSH);
344 sb0->
flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
347 next_tcp_seq += first_data_size;
354 l234_sz + first_data_size);
356 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
360 u8 *src_ptr, *dst_ptr;
361 u16 src_left, dst_left;
369 src_ptr = sb0->
data + l234_sz + first_data_size;
381 &dst_left, next_tcp_seq, default_bflags);
385 while (total_src_left)
393 src_left -= bytes_to_copy;
394 src_ptr += bytes_to_copy;
395 total_src_left -= bytes_to_copy;
396 dst_left -= bytes_to_copy;
397 dst_ptr += bytes_to_copy;
398 next_tcp_seq += bytes_to_copy;
403 int has_next = (csb0->
flags & VLIB_BUFFER_NEXT_PRESENT);
416 ASSERT (total_src_left == 0);
420 if (0 == dst_left && total_src_left)
428 gso_size, &dst_ptr, &dst_left,
429 next_tcp_seq, default_bflags);
475 u32 n_left_to_tx, *from, *from_end, *to_tx;
476 u32 n_bytes, n_buffers, n_packets;
477 u32 n_bytes_b0, n_bytes_b1, n_bytes_b2, n_bytes_b3;
481 u32 current_config_index = ~0;
482 u8 arc = im->output_feature_arc_index;
522 from_end = from + n_buffers;
539 while (from < from_end)
545 while (from + 8 <= from_end && n_left_to_tx >= 4)
547 u32 bi0, bi1, bi2, bi3;
549 u32 tx_swif0, tx_swif1, tx_swif2, tx_swif3;
566 if (!do_segmentation)
606 n_bytes += n_bytes_b0 + n_bytes_b1;
607 n_bytes += n_bytes_b2 + n_bytes_b3;
627 thread_index, tx_swif0, 1,
636 thread_index, tx_swif1, 1,
645 thread_index, tx_swif2, 1,
653 thread_index, tx_swif3, 1,
657 if (!do_segmentation)
663 (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
664 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
665 VNET_BUFFER_F_OFFLOAD_IP_CKSUM))
675 while (from + 1 <= from_end && n_left_to_tx >= 1)
695 n_bytes += n_bytes_b0;
716 n_bytes -= n_bytes_b0;
733 u32 *from_tx_seg = ptd->split_buffers;
735 while (n_tx_bufs > 0)
737 if (n_tx_bufs >= n_left_to_tx)
739 while (n_left_to_tx > 0)
741 to_tx[0] = from_tx_seg[0];
751 to_tx, n_left_to_tx);
755 while (n_tx_bufs > 0)
757 to_tx[0] = from_tx_seg[0];
766 n_bytes += n_tx_bytes;
771 (im->combined_sw_if_counters +
773 _vec_len (ptd->split_buffers), n_tx_bytes);
776 _vec_len (ptd->split_buffers) = 0;
788 thread_index, tx_swif0, 1,
810 int sw_if_index_from_buffer)
812 u32 n_left_from, *from;
818 if (sw_if_index_from_buffer == 0)
829 while (n_left_from > 0)
834 if (sw_if_index_from_buffer)
845 #ifndef CLIB_MARCH_VARIANT 904 u32 n_left_to_next, *from, *to_next;
905 u32 n_left_from, next_index;
913 next_index = node->cached_next_index;
915 while (n_left_from > 0)
919 while (n_left_from >= 4 && n_left_to_next >= 2)
921 u32 bi0, bi1, next0, next1;
954 n_left_to_next, bi0, bi1, next0,
958 while (n_left_from > 0 && n_left_to_next > 0)
981 n_left_to_next, bi0, next0);
1013 u32 n_left, *buffers;
1034 if (b0->
flags & VLIB_BUFFER_IS_TRACED)
1039 if (b1->
flags & VLIB_BUFFER_IS_TRACED)
1058 if (b0->
flags & VLIB_BUFFER_IS_TRACED)
1079 vnet_error_disposition_t disposition)
1093 sw_if_index = sw_if_indices;
1147 sw_if_index = sw_if_indices + off;
1175 i16 save_current_data;
1176 u16 save_current_length;
1219 #ifndef CLIB_MARCH_VARIANT 1256 .name =
"error-drop",
1257 .vector_size =
sizeof (
u32),
1268 .name =
"error-punt",
1269 .vector_size =
sizeof (
u32),
1280 .name =
"interface-output",
1281 .vector_size =
sizeof (
u32),
1290 u32 last_sw_if_index = ~0;
1293 u32 *from, *to_next = 0;
1298 while (n_left_from > 0)
1310 if (
PREDICT_FALSE ((last_sw_if_index != sw_if_index0) || to_frame == 0))
1317 last_sw_if_index = sw_if_index0;
1334 .name =
"interface-tx",
1335 .vector_size =
sizeof (
u32),
1344 .arc_name =
"interface-output",
1346 .last_in_arc =
"interface-tx",
1351 .arc_name =
"interface-output",
1352 .node_name =
"span-output",
1357 .arc_name =
"interface-output",
1358 .node_name =
"ipsec-if-output",
1363 .arc_name =
"interface-output",
1364 .node_name =
"interface-tx",
1369 #ifndef CLIB_MARCH_VARIANT 1382 (vnm->
vlib_main, vnet_per_buffer_interface_output_node.index,
1394 u32 hw_if_index,
u32 node_index)
1399 (vnm->
vlib_main, vnet_per_buffer_interface_output_node.index, node_index);
1469 else if (
unformat (input,
"max %d", &max))
1475 else if (
unformat (input,
"intfc %U",
1479 else if (
unformat (input,
"intfc any"))
1484 else if (
unformat (input,
"file %s", &filename))
1486 u8 *chroot_filename;
1488 if (strstr ((
char *) filename,
"..")
1489 || index ((
char *) filename,
'/'))
1496 chroot_filename =
format (0,
"/tmp/%s%c", filename, 0);
1505 else if (
unformat (input,
"status"))
1532 .path =
"pcap drop trace",
1534 "pcap drop trace on off max <nn> intfc <intfc> file <name> status",
u8 * format_vnet_interface_output_trace(u8 *s, va_list *va)
vnet_config_main_t config_main
static_always_inline uword vnet_interface_output_node_inline_gso(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int do_tx_offloads, int do_segmentation)
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
VNET_FEATURE_ARC_INIT(interface_output, static)
#define hash_set(h, key, value)
char * file_name
File name of pcap output.
void vnet_set_interface_output_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Set interface output node - for interface registered without its output/tx nodes created because its ...
static_always_inline uword vnet_interface_output_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int do_tx_offloads)
#define hash_unset(h, key)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 runtime_data[0]
Function dependent node-runtime data.
u32 n_packets_to_capture
Number of packets to capture.
vnet_main_t * vnet_get_main(void)
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_main_t interface_main
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 drop_error_code)
#define clib_memcpy_fast(a, b, c)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u16 current_length
Nbytes between current data and the end of this buffer.
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u16 l4_hdr_sz)
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags)
clib_error_t * vnet_per_buffer_interface_output_hw_interface_add_del(vnet_main_t *vnm, u32 hw_if_index, u32 is_create)
static void vnet_interface_output_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, uword n_buffers)
clib_memset(h->entries, 0, sizeof(h->entries[0])*entries)
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
unformat_function_t unformat_vnet_sw_interface
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
void vnet_pcap_drop_trace_filter_add_del(u32 error_index, int is_add)
#define VLIB_NODE_FN(node)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
struct _tcp_header tcp_header_t
format_function_t format_vnet_sw_if_index_name
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
#define static_always_inline
uword * pcap_drop_filter_hash
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, int do_tx_offloads, u32 sbi0, vlib_buffer_t *sb0, u32 n_bytes_b0)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
static_always_inline void calc_checksums(vlib_main_t *vm, vlib_buffer_t *b)
struct vnet_error_trace_t_ vnet_error_trace_t
vnet_hw_interface_flags_t flags
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
#define clib_error_return(e, args...)
A collection of simple counters.
static void pcap_drop_trace(vlib_main_t *vm, vnet_interface_main_t *im, vlib_frame_t *f)
static uword interface_tx_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
format_function_t format_vnet_sw_interface_name
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
static u8 * format_vnet_error_trace(u8 *s, va_list *va)
VNET_FEATURE_INIT(span_tx, static)
uword vnet_interface_output_node(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
vlib_simple_counter_main_t * sw_if_counters
format_function_t * format_buffer
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void interface_trace_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static_always_inline void tso_fixup_segmented_buf(vlib_buffer_t *b0, u8 tcp_flags, int is_ip6)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
#define vec_free(V)
Free vector's memory (no header).
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
clib_error_t * pcap_write(pcap_main_t *pm)
Write PCAP file.
u8 data[128-3 *sizeof(u32)]
#define VLIB_CLI_COMMAND(x,...)
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
#define hash_create(elts, value_bytes)
u32 output_node_next_index
u8 output_feature_arc_index
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
#define clib_error_report(e)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static_always_inline void vnet_interface_pcap_tx_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int sw_if_index_from_buffer)
#define VNET_FEATURES(...)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
VNET_HW_INTERFACE_ADD_DEL_FUNCTION(vnet_per_buffer_interface_output_hw_interface_add_del)
pcap_packet_type_t packet_type
Packet type.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static_always_inline u32 vnet_get_feature_config_index(u8 arc, u32 sw_if_index)
u32 next_buffer
Next buffer for this linked-list of buffers.
VLIB buffer representation.
vnet_sw_interface_t * sw_interfaces
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static clib_error_t * pcap_drop_trace_command_fn(vlib_main_t *vm, unformat_input_t *input, vlib_cli_command_t *cmd)
vnet_pcap_t pcap[VLIB_N_RX_TX]
static_always_inline uword clib_count_equal_u32(u32 *data, uword max_count)
u16 flags
Copy of main node flags.
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
static_always_inline uword interface_drop_punt(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_error_disposition_t disposition)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static_always_inline vnet_feature_config_main_t * vnet_feature_get_config_main(u16 arc)
static void pcap_add_buffer(pcap_main_t *pm, struct vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
u32 n_packets_captured
Number of packets currently captured.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
#define clib_panic(format, args...)
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.