FD.io VPP  v21.01.1
Vector Packet Processing
svm_fifo.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016-2019 Cisco and/or its affiliates.
3  * Copyright (c) 2019 Arm Limited
4  * Copyright (c) 2010-2017 Intel Corporation and/or its affiliates.
5  * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
6  * Inspired from DPDK rte_ring.h (SPSC only) (derived from freebsd bufring.h).
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at:
10  *
11  * http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  */
19 
20 #include <svm/svm_fifo.h>
21 #include <svm/fifo_segment.h>
22 #include <vppinfra/cpu.h>
23 
25  svm_fifo_chunk_t * c, u32 tail_idx, const u8 * src, u32 len,
27 {
28  u32 n_chunk;
29 
30  ASSERT (f_pos_geq (tail_idx, c->start_byte)
31  && f_pos_lt (tail_idx, c->start_byte + c->length));
32 
33  tail_idx -= c->start_byte;
34  n_chunk = c->length - tail_idx;
35  if (n_chunk <= len)
36  {
37  u32 to_copy = len;
38  clib_memcpy_fast (&c->data[tail_idx], src, n_chunk);
39  c = c->next;
40  while ((to_copy -= n_chunk))
41  {
42  n_chunk = clib_min (c->length, to_copy);
43  clib_memcpy_fast (&c->data[0], src + (len - to_copy), n_chunk);
44  c = c->length <= to_copy ? c->next : c;
45  }
46  if (*last)
47  *last = c;
48  }
49  else
50  {
51  clib_memcpy_fast (&c->data[tail_idx], src, len);
52  }
53 }
54 
56  svm_fifo_chunk_t * c, u32 head_idx, u8 * dst, u32 len,
58 {
59  u32 n_chunk;
60 
61  ASSERT (f_pos_geq (head_idx, c->start_byte)
62  && f_pos_lt (head_idx, c->start_byte + c->length));
63 
64  head_idx -= c->start_byte;
65  n_chunk = c->length - head_idx;
66  if (n_chunk <= len)
67  {
68  u32 to_copy = len;
69  clib_memcpy_fast (dst, &c->data[head_idx], n_chunk);
70  c = c->next;
71  while ((to_copy -= n_chunk))
72  {
73  CLIB_MEM_UNPOISON (c, sizeof (*c));
74  CLIB_MEM_UNPOISON (c->data, c->length);
75  n_chunk = clib_min (c->length, to_copy);
76  clib_memcpy_fast (dst + (len - to_copy), &c->data[0], n_chunk);
77  c = c->length <= to_copy ? c->next : c;
78  }
79  if (*last)
80  *last = c;
81  }
82  else
83  {
84  clib_memcpy_fast (dst, &c->data[head_idx], len);
85  }
86 }
87 
88 #ifndef CLIB_MARCH_VARIANT
89 
90 static inline void
92  const u8 * src, u32 len, svm_fifo_chunk_t ** last)
93 {
95  last);
96 }
97 
98 static inline void
101 {
103  last);
104 }
105 
106 static inline u32
108 {
109  return (s->start + s->length);
110 }
111 
112 void
114 {
115  pool_free (f->ooo_segments);
116 }
117 
118 static inline ooo_segment_t *
120 {
122  return 0;
123  return pool_elt_at_index (f->ooo_segments, s->prev);
124 }
125 
126 static inline ooo_segment_t *
128 {
130  return 0;
131  return pool_elt_at_index (f->ooo_segments, s->next);
132 }
133 
134 static inline ooo_segment_t *
136 {
137  ooo_segment_t *s;
138 
139  pool_get (f->ooo_segments, s);
140 
141  s->start = start;
142  s->length = length;
144 
145  return s;
146 }
147 
148 static inline void
150 {
151  ooo_segment_t *cur, *prev = 0, *next = 0;
152  cur = pool_elt_at_index (f->ooo_segments, index);
153 
154  if (cur->next != OOO_SEGMENT_INVALID_INDEX)
155  {
156  next = pool_elt_at_index (f->ooo_segments, cur->next);
157  next->prev = cur->prev;
158  }
159 
160  if (cur->prev != OOO_SEGMENT_INVALID_INDEX)
161  {
162  prev = pool_elt_at_index (f->ooo_segments, cur->prev);
163  prev->next = cur->next;
164  }
165  else
166  {
167  f->ooos_list_head = cur->next;
168  }
169 
170  pool_put (f->ooo_segments, cur);
171 }
172 
173 /**
174  * Add segment to fifo's out-of-order segment list. Takes care of merging
175  * adjacent segments and removing overlapping ones.
176  */
177 static void
179 {
180  ooo_segment_t *s, *new_s, *prev, *next, *it;
181  u32 new_index, s_end_pos, s_index;
182  u32 offset_pos, offset_end_pos;
183 
184  ASSERT (offset + length <= f_free_count (f, head, tail));
185 
186  offset_pos = tail + offset;
187  offset_end_pos = tail + offset + length;
188 
189  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
190 
191  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
192  {
193  s = ooo_segment_alloc (f, offset_pos, length);
194  f->ooos_list_head = s - f->ooo_segments;
195  f->ooos_newest = f->ooos_list_head;
196  return;
197  }
198 
199  /* Find first segment that starts after new segment */
200  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
201  while (s->next != OOO_SEGMENT_INVALID_INDEX
202  && f_pos_lt (s->start, offset_pos))
203  s = pool_elt_at_index (f->ooo_segments, s->next);
204 
205  /* If we have a previous and we overlap it, use it as starting point */
206  prev = ooo_segment_prev (f, s);
207  if (prev && f_pos_leq (offset_pos, ooo_segment_end_pos (prev)))
208  {
209  s = prev;
210  s_end_pos = ooo_segment_end_pos (s);
211 
212  /* Since we have previous, offset start position cannot be smaller
213  * than prev->start. Check tail */
214  ASSERT (f_pos_lt (s->start, offset_pos));
215  goto check_tail;
216  }
217 
218  s_index = s - f->ooo_segments;
219  s_end_pos = ooo_segment_end_pos (s);
220 
221  /* No overlap, add before current segment */
222  if (f_pos_lt (offset_end_pos, s->start))
223  {
224  new_s = ooo_segment_alloc (f, offset_pos, length);
225  new_index = new_s - f->ooo_segments;
226 
227  /* Pool might've moved, get segment again */
228  s = pool_elt_at_index (f->ooo_segments, s_index);
230  {
231  new_s->prev = s->prev;
232  prev = pool_elt_at_index (f->ooo_segments, new_s->prev);
233  prev->next = new_index;
234  }
235  else
236  {
237  /* New head */
238  f->ooos_list_head = new_index;
239  }
240 
241  new_s->next = s_index;
242  s->prev = new_index;
243  f->ooos_newest = new_index;
244  return;
245  }
246  /* No overlap, add after current segment */
247  else if (f_pos_gt (offset_pos, s_end_pos))
248  {
249  new_s = ooo_segment_alloc (f, offset_pos, length);
250  new_index = new_s - f->ooo_segments;
251 
252  /* Pool might've moved, get segment again */
253  s = pool_elt_at_index (f->ooo_segments, s_index);
254 
255  /* Needs to be last */
257 
258  new_s->prev = s_index;
259  s->next = new_index;
260  f->ooos_newest = new_index;
261 
262  return;
263  }
264 
265  /*
266  * Merge needed
267  */
268 
269  /* Merge at head */
270  if (f_pos_lt (offset_pos, s->start))
271  {
272  s->start = offset_pos;
273  s->length = s_end_pos - s->start;
274  f->ooos_newest = s - f->ooo_segments;
275  }
276 
277 check_tail:
278 
279  /* Overlapping tail */
280  if (f_pos_gt (offset_end_pos, s_end_pos))
281  {
282  s->length = offset_end_pos - s->start;
283 
284  /* Remove the completely overlapped segments in the tail */
285  it = ooo_segment_next (f, s);
286  while (it && f_pos_leq (ooo_segment_end_pos (it), offset_end_pos))
287  {
288  next = ooo_segment_next (f, it);
289  ooo_segment_free (f, it - f->ooo_segments);
290  it = next;
291  }
292 
293  /* If partial overlap with last, merge */
294  if (it && f_pos_leq (it->start, offset_end_pos))
295  {
296  s->length = ooo_segment_end_pos (it) - s->start;
297  ooo_segment_free (f, it - f->ooo_segments);
298  }
299  f->ooos_newest = s - f->ooo_segments;
300  }
301 }
302 
303 /**
304  * Removes segments that can now be enqueued because the fifo's tail has
305  * advanced. Returns the number of bytes added to tail.
306  */
307 static int
308 ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail)
309 {
310  u32 s_index, bytes = 0;
311  ooo_segment_t *s;
312  i32 diff;
313 
314  s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
315  diff = *tail - s->start;
316 
317  ASSERT (diff != n_bytes_enqueued);
318 
319  if (diff > n_bytes_enqueued)
320  return 0;
321 
322  /* If last tail update overlaps one/multiple ooo segments, remove them */
323  while (0 <= diff && diff < n_bytes_enqueued)
324  {
325  s_index = s - f->ooo_segments;
326 
327  /* Segment end is beyond the tail. Advance tail and remove segment */
328  if (s->length > diff)
329  {
330  bytes = s->length - diff;
331  *tail = *tail + bytes;
332  ooo_segment_free (f, s_index);
333  break;
334  }
335 
336  /* If we have next go on */
338  {
339  s = pool_elt_at_index (f->ooo_segments, s->next);
340  diff = *tail - s->start;
341  ooo_segment_free (f, s_index);
342  }
343  /* End of search */
344  else
345  {
346  ooo_segment_free (f, s_index);
347  break;
348  }
349  }
350 
351  ASSERT (bytes <= f->size);
352  return bytes;
353 }
354 
355 __clib_unused static ooo_segment_t *
357 {
358  ooo_segment_t *s;
359 
360  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
361  return 0;
362 
364  while (s->next != OOO_SEGMENT_INVALID_INDEX)
365  s = pool_elt_at_index (f->ooo_segments, s->next);
366  return s;
367 }
368 
369 void
371 {
372  svm_fifo_chunk_t *c, *prev;
373  u32 min_alloc;
374 
375  f->size = size;
376  f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
377  f->segment_index = SVM_FIFO_INVALID_INDEX;
378  f->refcnt = 1;
379  f->head = f->tail = f->flags = 0;
380  f->head_chunk = f->tail_chunk = f->start_chunk;
381  f->ooo_deq = f->ooo_enq = 0;
382 
383  min_alloc = size > 32 << 10 ? size >> 3 : 4096;
384  min_alloc = clib_min (min_alloc, 64 << 10);
385  f->min_alloc = min_alloc;
386 
387  /*
388  * Initialize chunks
389  */
390  f->start_chunk->start_byte = 0;
391  prev = f->start_chunk;
393  c = prev->next;
394 
395  while (c)
396  {
397  c->start_byte = prev->start_byte + prev->length;
399  prev = c;
400  c = c->next;
401  }
402 }
403 
404 void
406 {
407  if (ooo_type == 0)
408  {
409  ASSERT (!rb_tree_is_init (&f->ooo_enq_lookup));
410  rb_tree_init (&f->ooo_enq_lookup);
411  }
412  else
413  {
414  ASSERT (!rb_tree_is_init (&f->ooo_deq_lookup));
415  rb_tree_init (&f->ooo_deq_lookup);
416  }
417 }
418 
419 /**
420  * Creates a fifo in the current heap. Fails vs blow up the process
421  */
422 svm_fifo_t *
423 svm_fifo_alloc (u32 data_size_in_bytes)
424 {
425  u32 rounded_data_size;
427  svm_fifo_t *f;
428 
430  if (f == 0)
431  return 0;
432 
433  clib_memset (f, 0, sizeof (*f));
434 
435  /* always round fifo data size to the next highest power-of-two */
436  rounded_data_size = (1 << (max_log2 (data_size_in_bytes)));
437  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_data_size,
439  if (!c)
440  {
441  clib_mem_free (f);
442  return 0;
443  }
444 
445  clib_memset (c, 0, sizeof (*c));
446  c->start_byte = 0;
447  c->length = data_size_in_bytes;
450  f->start_chunk = f->end_chunk = c;
451 
452  return f;
453 }
454 
455 /**
456  * Creates a fifo chunk in the current heap
457  */
460 {
462  u32 rounded_size;
463 
464  /* round chunk size to the next highest power-of-two */
465  rounded_size = (1 << (max_log2 (size)));
466  c = clib_mem_alloc_aligned_or_null (sizeof (*c) + rounded_size,
468  if (c == 0)
469  return 0;
470 
471  clib_memset (c, 0, sizeof (*c));
472  c->length = rounded_size;
473  return c;
474 }
475 
476 /**
477  * Find chunk for given byte position
478  *
479  * @param f fifo
480  * @param pos normalized position in fifo
481  *
482  * @return chunk that includes given position or 0
483  */
484 static svm_fifo_chunk_t *
486 {
488 
489  c = f->start_chunk;
490  while (c && !f_chunk_includes_pos (c, pos))
491  c = c->next;
492 
493  return c;
494 }
495 
496 static svm_fifo_chunk_t *
498 {
500 
501  ASSERT (start != 0);
502 
503  c = start;
504  while (c && !f_chunk_includes_pos (c, pos))
505  c = c->next;
506 
507  return c;
508 }
509 
510 u32
512 {
513  u32 head, tail, end_chunk;
514 
515  f_load_head_tail_cons (f, &head, &tail);
516  ASSERT (!f->head_chunk || f_chunk_includes_pos (f->head_chunk, head));
517 
518  if (!f->head_chunk)
519  {
520  f->head_chunk = svm_fifo_find_chunk (f, head);
521  if (PREDICT_FALSE (!f->head_chunk))
522  return 0;
523  }
524 
525  end_chunk = f_chunk_end (f->head_chunk);
526 
527  return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head;
528 }
529 
530 u32
532 {
533  u32 head, tail;
534 
535  f_load_head_tail_prod (f, &head, &tail);
536  ASSERT (!f->tail_chunk || f_chunk_includes_pos (f->tail_chunk, tail));
537 
538  return f->tail_chunk ? f_chunk_end (f->tail_chunk) - tail : 0;
539 }
540 
541 static rb_node_t *
543 {
544  rb_node_t *cur, *prev;
545 
546  cur = rb_node (rt, rt->root);
547  if (PREDICT_FALSE (rb_node_is_tnil (rt, cur)))
548  return 0;
549 
550  while (pos != cur->key)
551  {
552  prev = cur;
553  if (f_pos_lt (pos, cur->key))
554  {
555  cur = rb_node_left (rt, cur);
556  if (rb_node_is_tnil (rt, cur))
557  {
558  cur = rb_tree_predecessor (rt, prev);
559  break;
560  }
561  }
562  else
563  {
564  cur = rb_node_right (rt, cur);
565  if (rb_node_is_tnil (rt, cur))
566  {
567  cur = prev;
568  break;
569  }
570  }
571  }
572 
573  if (rb_node_is_tnil (rt, cur))
574  return 0;
575 
576  return cur;
577 }
578 
579 static svm_fifo_chunk_t *
581 {
583  rb_node_t *n;
584 
585  if (!rb_tree_is_init (rt))
586  return 0;
587 
588  n = f_find_node_rbtree (rt, pos);
589  if (!n)
590  return 0;
592  if (f_chunk_includes_pos (c, pos))
593  return c;
594 
595  return 0;
596 }
597 
598 static void
599 f_update_ooo_enq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
600 {
601  rb_tree_t *rt = &f->ooo_enq_lookup;
603  rb_node_t *cur;
604 
605  /* Use linear search if rbtree is not initialized */
606  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
607  {
608  f->ooo_enq = svm_fifo_find_next_chunk (f, f->tail_chunk, start_pos);
609  return;
610  }
611 
612  if (rt->root == RBTREE_TNIL_INDEX)
613  {
614  c = f->tail_chunk;
618  }
619  else
620  {
621  cur = f_find_node_rbtree (rt, start_pos);
623  ASSERT (f_pos_leq (c->start_byte, start_pos));
624  }
625 
626  if (f_chunk_includes_pos (c, start_pos))
627  f->ooo_enq = c;
628 
629  if (f_chunk_includes_pos (c, end_pos))
630  return;
631 
632  do
633  {
634  c = c->next;
635  if (!c || c->enq_rb_index != RBTREE_TNIL_INDEX)
636  break;
637 
640 
641  if (f_chunk_includes_pos (c, start_pos))
642  f->ooo_enq = c;
643  }
644  while (!f_chunk_includes_pos (c, end_pos));
645 }
646 
647 static void
648 f_update_ooo_deq (svm_fifo_t * f, u32 start_pos, u32 end_pos)
649 {
650  rb_tree_t *rt = &f->ooo_deq_lookup;
651  rb_node_t *cur;
653 
654  /* Use linear search if rbtree is not initialized */
655  if (PREDICT_FALSE (!rb_tree_is_init (rt)))
656  {
657  f->ooo_deq = svm_fifo_find_chunk (f, start_pos);
658  return;
659  }
660 
661  if (rt->root == RBTREE_TNIL_INDEX)
662  {
663  c = f->start_chunk;
667  }
668  else
669  {
670  cur = f_find_node_rbtree (rt, start_pos);
672  ASSERT (f_pos_leq (c->start_byte, start_pos));
673  }
674 
675  if (f_chunk_includes_pos (c, start_pos))
676  f->ooo_deq = c;
677 
678  if (f_chunk_includes_pos (c, end_pos))
679  return;
680 
681  do
682  {
683  c = c->next;
684  if (!c || c->deq_rb_index != RBTREE_TNIL_INDEX)
685  break;
686 
689 
690  if (f_chunk_includes_pos (c, start_pos))
691  f->ooo_deq = c;
692  }
693  while (!f_chunk_includes_pos (c, end_pos));
694 }
695 
696 static svm_fifo_chunk_t *
698  u32 end_pos)
699 {
700  rb_tree_t *rt = &f->ooo_enq_lookup;
702  rb_node_t *n;
703 
704  c = start;
705  while (c && !f_chunk_includes_pos (c, end_pos))
706  {
708  {
709  n = rb_node (rt, c->enq_rb_index);
710  rb_tree_del_node (rt, n);
712  }
713 
714  c = c->next;
715  }
716 
717  /* No ooo segments left, so make sure the current chunk
718  * is not tracked in the enq rbtree */
719  if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX
720  && c && c->enq_rb_index != RBTREE_TNIL_INDEX)
721  {
722  n = rb_node (rt, c->enq_rb_index);
723  rb_tree_del_node (rt, n);
725  }
726 
727  return c;
728 }
729 
730 static svm_fifo_chunk_t *
732  u32 end_pos)
733 {
734  rb_tree_t *rt = &f->ooo_deq_lookup;
736  rb_node_t *n;
737 
738  c = start;
739  while (c && !f_chunk_includes_pos (c, end_pos))
740  {
742  {
743  n = rb_node (rt, c->deq_rb_index);
744  rb_tree_del_node (rt, n);
746  }
747 
748  c = c->next;
749  }
750 
751  return c;
752 }
753 
754 void
756 {
757  rb_tree_free_nodes (&f->ooo_enq_lookup);
758  rb_tree_free_nodes (&f->ooo_deq_lookup);
759 }
760 
761 void
763 {
764  ASSERT (f->refcnt > 0);
765 
766  if (--f->refcnt == 0)
767  {
768  /* ooo data is not allocated on segment heap */
770  clib_mem_free (f);
771  }
772 }
773 
774 void
776 {
777  u32 n_chunk;
778  u32 head, tail, head_idx;
780 
781  ASSERT (len <= f->size);
782 
783  f_load_head_tail_cons (f, &head, &tail);
784 
785  if (!f->head_chunk)
786  f->head_chunk = svm_fifo_find_chunk (f, head);
787 
788  c = f->head_chunk;
789  head_idx = head - c->start_byte;
790  n_chunk = c->length - head_idx;
791  if (len <= n_chunk)
792  clib_memcpy_fast (&c->data[head_idx], src, len);
793  else
794  {
795  ASSERT (len - n_chunk <= c->next->length);
796  clib_memcpy_fast (&c->data[head_idx], src, n_chunk);
797  clib_memcpy_fast (&c->next->data[0], src + n_chunk, len - n_chunk);
798  }
799 }
800 
801 static int
803 {
804  svm_fifo_chunk_t *c, *cur, *prev;
805  u32 alloc_size, free_alloced;
806 
807  free_alloced = f_chunk_end (f->end_chunk) - tail;
808 
809  alloc_size = clib_min (f->min_alloc, f->size - (tail - head));
810  alloc_size = clib_max (alloc_size, len - free_alloced);
811 
812  c = fsh_alloc_chunk (f->fs_hdr, f->slice_index, alloc_size);
813  if (PREDICT_FALSE (!c))
814  return -1;
815 
816  cur = c;
817  prev = f->end_chunk;
818 
819  while (cur)
820  {
821  cur->start_byte = prev->start_byte + prev->length;
824 
825  prev = cur;
826  cur = cur->next;
827  }
828 
829  prev->next = 0;
830  f->end_chunk->next = c;
831  f->end_chunk = prev;
832 
833  if (!f->tail_chunk)
834  f->tail_chunk = c;
835 
836  return 0;
837 }
838 
839 int
841 {
842  u32 tail, head, free_count;
843  svm_fifo_chunk_t *old_tail_c;
844 
845  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
846 
847  f_load_head_tail_prod (f, &head, &tail);
848 
849  /* free space in fifo can only increase during enqueue: SPSC */
850  free_count = f_free_count (f, head, tail);
851 
852  if (PREDICT_FALSE (free_count == 0))
853  return SVM_FIFO_EFULL;
854 
855  /* number of bytes we're going to copy */
856  len = clib_min (free_count, len);
857 
858  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
859  {
860  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
861  {
862  len = f_chunk_end (f->end_chunk) - tail;
863  if (!len)
864  return SVM_FIFO_EGROW;
865  }
866  }
867 
868  old_tail_c = f->tail_chunk;
869 
870  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, src, len, &f->tail_chunk);
871  tail = tail + len;
872 
873  svm_fifo_trace_add (f, head, len, 2);
874 
875  /* collect out-of-order segments */
876  if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
877  {
878  len += ooo_segment_try_collect (f, len, &tail);
879  /* Tail chunk might've changed even if nothing was collected */
880  f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
881  f->ooo_enq = 0;
882  }
883 
884  /* store-rel: producer owned index (paired with load-acq in consumer) */
885  clib_atomic_store_rel_n (&f->tail, tail);
886 
887  return len;
888 }
889 
890 /**
891  * Enqueue a future segment.
892  *
893  * Two choices: either copies the entire segment, or copies nothing
894  * Returns 0 of the entire segment was copied
895  * Returns -1 if none of the segment was copied due to lack of space
896  */
897 int
899 {
900  u32 tail, head, free_count, enq_pos;
901 
902  f_load_head_tail_prod (f, &head, &tail);
903 
904  /* free space in fifo can only increase during enqueue: SPSC */
905  free_count = f_free_count (f, head, tail);
906  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
907 
908  /* will this request fit? */
909  if ((len + offset) > free_count)
910  return SVM_FIFO_EFULL;
911 
912  enq_pos = tail + offset;
913 
914  if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk)))
915  {
916  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, offset + len)))
917  return SVM_FIFO_EGROW;
918  }
919 
920  svm_fifo_trace_add (f, offset, len, 1);
921  ooo_segment_add (f, offset, head, tail, len);
922 
923  if (!f->ooo_enq || !f_chunk_includes_pos (f->ooo_enq, enq_pos))
924  f_update_ooo_enq (f, enq_pos, enq_pos + len);
925 
926  svm_fifo_copy_to_chunk (f, f->ooo_enq, enq_pos, src, len, &f->ooo_enq);
927 
928  return 0;
929 }
930 
931 /**
932  * Advance tail
933  */
934 void
936 {
937  u32 tail;
938 
939  ASSERT (len <= svm_fifo_max_enqueue_prod (f));
940  /* load-relaxed: producer owned index */
941  tail = f->tail;
942  tail = tail + len;
943 
944  if (rb_tree_is_init (&f->ooo_enq_lookup))
945  {
946  f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail);
947  f->ooo_enq = 0;
948  }
949  else
950  {
951  f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail);
952  }
953 
954  /* store-rel: producer owned index (paired with load-acq in consumer) */
955  clib_atomic_store_rel_n (&f->tail, tail);
956 }
957 
958 int
960  u32 n_segs, u8 allow_partial)
961 {
962  u32 tail, head, free_count, len = 0, i;
963  svm_fifo_chunk_t *old_tail_c;
964 
965  f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
966 
967  f_load_head_tail_prod (f, &head, &tail);
968 
969  /* free space in fifo can only increase during enqueue: SPSC */
970  free_count = f_free_count (f, head, tail);
971 
972  if (PREDICT_FALSE (free_count == 0))
973  return SVM_FIFO_EFULL;
974 
975  for (i = 0; i < n_segs; i++)
976  len += segs[i].len;
977 
978  old_tail_c = f->tail_chunk;
979 
980  if (!allow_partial)
981  {
982  if (PREDICT_FALSE (free_count < len))
983  return SVM_FIFO_EFULL;
984 
985  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
986  {
987  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
988  return SVM_FIFO_EGROW;
989  }
990 
991  for (i = 0; i < n_segs; i++)
992  {
993  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, segs[i].data,
994  segs[i].len, &f->tail_chunk);
995  tail += segs[i].len;
996  }
997  }
998  else
999  {
1000  len = clib_min (free_count, len);
1001 
1002  if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk)))
1003  {
1004  if (PREDICT_FALSE (f_try_chunk_alloc (f, head, tail, len)))
1005  {
1006  len = f_chunk_end (f->end_chunk) - tail;
1007  if (!len)
1008  return SVM_FIFO_EGROW;
1009  }
1010  }
1011 
1012  i = 0;
1013  while (len)
1014  {
1015  u32 to_copy = clib_min (segs[i].len, len);
1016  svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, segs[i].data,
1017  to_copy, &f->tail_chunk);
1018  len -= to_copy;
1019  tail += to_copy;
1020  i++;
1021  }
1022  }
1023 
1024  /* collect out-of-order segments */
1025  if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
1026  {
1027  len += ooo_segment_try_collect (f, len, &tail);
1028  /* Tail chunk might've changed even if nothing was collected */
1029  f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail);
1030  f->ooo_enq = 0;
1031  }
1032 
1033  /* store-rel: producer owned index (paired with load-acq in consumer) */
1034  clib_atomic_store_rel_n (&f->tail, tail);
1035 
1036  return len;
1037 }
1038 
1040 f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo)
1041 {
1042  svm_fifo_chunk_t *start, *prev = 0, *c;
1043  rb_tree_t *rt;
1044  rb_node_t *n;
1045 
1046  ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos));
1047 
1048  if (maybe_ooo)
1049  rt = &f->ooo_deq_lookup;
1050 
1051  c = f->start_chunk;
1052 
1053  do
1054  {
1055  if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX)
1056  {
1057  n = rb_node (rt, c->deq_rb_index);
1058  ASSERT (n == f_find_node_rbtree (rt, c->start_byte));
1059  rb_tree_del_node (rt, n);
1060  c->deq_rb_index = RBTREE_TNIL_INDEX;
1061  }
1062  if (!c->next)
1063  break;
1064  prev = c;
1065  c = c->next;
1066  }
1067  while (!f_chunk_includes_pos (c, end_pos));
1068 
1069  if (maybe_ooo)
1070  {
1071  if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c)))
1072  f->ooo_deq = 0;
1073  }
1074  else
1075  {
1076  if (PREDICT_FALSE (f->ooo_deq != 0))
1077  f->ooo_deq = 0;
1078  }
1079 
1080  /* Avoid unlinking the last chunk */
1081  if (!prev)
1082  return 0;
1083 
1084  prev->next = 0;
1085  start = f->start_chunk;
1086  f->start_chunk = c;
1087 
1088  return start;
1089 }
1090 
1091 int
1093 {
1094  u32 tail, head, cursize;
1095 
1096  f_load_head_tail_cons (f, &head, &tail);
1097 
1098  /* current size of fifo can only increase during dequeue: SPSC */
1099  cursize = f_cursize (f, head, tail);
1100 
1101  if (PREDICT_FALSE (cursize == 0))
1102  return SVM_FIFO_EEMPTY;
1103 
1104  len = clib_min (cursize, len);
1105 
1106  if (!f->head_chunk)
1107  f->head_chunk = svm_fifo_find_chunk (f, head);
1108 
1109  svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk);
1110  head = head + len;
1111 
1112  /* In order dequeues are not supported in combination with ooo peeking.
1113  * Use svm_fifo_dequeue_drop instead. */
1114  ASSERT (rb_tree_n_nodes (&f->ooo_deq_lookup) <= 1);
1115 
1116  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1117  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1118  f_unlink_chunks (f, head, 0));
1119 
1120  /* store-rel: consumer owned index (paired with load-acq in producer) */
1121  clib_atomic_store_rel_n (&f->head, head);
1122 
1123  return len;
1124 }
1125 
1126 int
1128 {
1129  u32 tail, head, cursize, head_idx;
1130 
1131  f_load_head_tail_cons (f, &head, &tail);
1132 
1133  /* current size of fifo can only increase during peek: SPSC */
1134  cursize = f_cursize (f, head, tail);
1135 
1136  if (PREDICT_FALSE (cursize < offset))
1137  return SVM_FIFO_EEMPTY;
1138 
1139  len = clib_min (cursize - offset, len);
1140  head_idx = head + offset;
1141 
1142  CLIB_MEM_UNPOISON (f->ooo_deq, sizeof (*f->ooo_deq));
1143  if (!f->ooo_deq || !f_chunk_includes_pos (f->ooo_deq, head_idx))
1144  f_update_ooo_deq (f, head_idx, head_idx + len);
1145 
1146  svm_fifo_copy_from_chunk (f, f->ooo_deq, head_idx, dst, len, &f->ooo_deq);
1147  return len;
1148 }
1149 
1150 int
1152 {
1153  u32 total_drop_bytes, tail, head, cursize;
1154 
1155  f_load_head_tail_cons (f, &head, &tail);
1156 
1157  /* number of bytes available */
1158  cursize = f_cursize (f, head, tail);
1159  if (PREDICT_FALSE (cursize == 0))
1160  return SVM_FIFO_EEMPTY;
1161 
1162  /* number of bytes we're going to drop */
1163  total_drop_bytes = clib_min (cursize, len);
1164 
1165  svm_fifo_trace_add (f, tail, total_drop_bytes, 3);
1166 
1167  /* move head */
1168  head = head + total_drop_bytes;
1169 
1170  if (f_pos_geq (head, f_chunk_end (f->start_chunk)))
1171  {
1172  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1173  f_unlink_chunks (f, head, 1));
1174  f->head_chunk =
1175  f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0;
1176  }
1177 
1178  /* store-rel: consumer owned index (paired with load-acq in producer) */
1179  clib_atomic_store_rel_n (&f->head, head);
1180 
1181  return total_drop_bytes;
1182 }
1183 
1184 /**
1185  * Drop all data from fifo
1186  *
1187  */
1188 void
1190 {
1191  u32 head, tail;
1192 
1193  f_load_head_tail_all_acq (f, &head, &tail);
1194 
1195  if (!f->head_chunk || !f_chunk_includes_pos (f->head_chunk, head))
1196  f->head_chunk = svm_fifo_find_chunk (f, head);
1197 
1198  f->head_chunk = f_lookup_clear_deq_chunks (f, f->head_chunk, tail);
1199 
1200  if (f_pos_geq (tail, f_chunk_end (f->start_chunk)))
1201  fsh_collect_chunks (f->fs_hdr, f->slice_index,
1202  f_unlink_chunks (f, tail, 0));
1203 
1204  /* store-rel: consumer owned index (paired with load-acq in producer) */
1205  clib_atomic_store_rel_n (&f->head, tail);
1206 }
1207 
1208 int
1210 {
1211  u32 head, tail;
1212 
1213  f_load_head_tail_prod (f, &head, &tail);
1214 
1215  if (f_chunk_end (f->end_chunk) - head >= f->size)
1216  return 0;
1217 
1218  if (f_try_chunk_alloc (f, head, tail, f->size - (tail - head)))
1219  return SVM_FIFO_EGROW;
1220 
1221  return 0;
1222 }
1223 
1224 int
1226  u32 n_segs, u32 max_bytes)
1227 {
1228  u32 cursize, to_read, head, tail, fs_index = 1;
1229  u32 n_bytes, head_pos, len, start;
1231 
1232  f_load_head_tail_cons (f, &head, &tail);
1233 
1234  /* consumer function, cursize can only increase while we're working */
1235  cursize = f_cursize (f, head, tail);
1236 
1237  if (PREDICT_FALSE (cursize == 0))
1238  return SVM_FIFO_EEMPTY;
1239 
1240  if (offset >= cursize)
1241  return SVM_FIFO_EEMPTY;
1242 
1243  to_read = clib_min (cursize - offset, max_bytes);
1244  start = head + offset;
1245 
1246  if (!f->head_chunk)
1247  f->head_chunk = svm_fifo_find_chunk (f, head);
1248 
1249  c = f->head_chunk;
1250 
1251  while (!f_chunk_includes_pos (c, start))
1252  c = c->next;
1253 
1254  head_pos = start - c->start_byte;
1255  fs[0].data = c->data + head_pos;
1256  fs[0].len = clib_min (c->length - head_pos, cursize - offset);
1257  n_bytes = fs[0].len;
1258 
1259  while (n_bytes < to_read && fs_index < n_segs)
1260  {
1261  c = c->next;
1262  len = clib_min (c->length, to_read - n_bytes);
1263  fs[fs_index].data = c->data;
1264  fs[fs_index].len = len;
1265  n_bytes += len;
1266  fs_index += 1;
1267  }
1268 
1269  return n_bytes;
1270 }
1271 
1272 /**
1273  * Clones fifo
1274  *
1275  * Assumptions:
1276  * - no prod and cons are accessing either dest or src fifo
1277  * - fifo is not multi chunk
1278  */
1279 void
1281 {
1282  u32 head, tail;
1283 
1284  /* Support only single chunk clones for now */
1285  ASSERT (svm_fifo_n_chunks (sf) == 1);
1286 
1287  clib_memcpy_fast (df->head_chunk->data, sf->head_chunk->data, sf->size);
1288 
1289  f_load_head_tail_all_acq (sf, &head, &tail);
1290  clib_atomic_store_rel_n (&df->head, head);
1291  clib_atomic_store_rel_n (&df->tail, tail);
1292 }
1293 
1294 u32
1296 {
1297  return pool_elts (f->ooo_segments);
1298 }
1299 
1300 ooo_segment_t *
1302 {
1303  return pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
1304 }
1305 
1306 /**
1307  * Set fifo pointers to requested offset
1308  */
1309 void
1311 {
1313 
1314  clib_atomic_store_rel_n (&f->head, head);
1315  clib_atomic_store_rel_n (&f->tail, tail);
1316 
1317  c = svm_fifo_find_chunk (f, head);
1318  ASSERT (c != 0);
1319  f->head_chunk = f->ooo_deq = c;
1320  c = svm_fifo_find_chunk (f, tail);
1321  ASSERT (c != 0);
1322  f->tail_chunk = f->ooo_enq = c;
1323 }
1324 
1325 void
1327 {
1328  if (f->n_subscribers >= SVM_FIFO_MAX_EVT_SUBSCRIBERS)
1329  return;
1330  f->subscribers[f->n_subscribers++] = subscriber;
1331 }
1332 
1333 void
1335 {
1336  int i;
1337 
1338  for (i = 0; i < f->n_subscribers; i++)
1339  {
1340  if (f->subscribers[i] != subscriber)
1341  continue;
1342  f->subscribers[i] = f->subscribers[f->n_subscribers - 1];
1343  f->n_subscribers--;
1344  break;
1345  }
1346 }
1347 
1348 u8
1350 {
1351  svm_fifo_chunk_t *tmp;
1352 
1353  if (f->head_chunk && !f_chunk_includes_pos (f->head_chunk, f->head))
1354  return 0;
1355  if (f->tail_chunk && !f_chunk_includes_pos (f->tail_chunk, f->tail))
1356  return 0;
1357  if (f->ooo_deq)
1358  {
1359  if (rb_tree_is_init (&f->ooo_deq_lookup))
1360  {
1361  if (f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte)
1362  || f_pos_gt (f->ooo_deq->start_byte,
1363  f_chunk_end (f->end_chunk)))
1364  return 0;
1365 
1366  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup,
1367  f->ooo_deq->start_byte);
1368  }
1369  else
1370  tmp = svm_fifo_find_chunk (f, f->ooo_deq->start_byte);
1371  if (tmp != f->ooo_deq)
1372  return 0;
1373  }
1374  if (f->ooo_enq)
1375  {
1376  if (rb_tree_is_init (&f->ooo_enq_lookup))
1377  {
1378  if (f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte)
1379  || f_pos_gt (f->ooo_enq->start_byte,
1380  f_chunk_end (f->end_chunk)))
1381  return 0;
1382 
1383  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup,
1384  f->ooo_enq->start_byte);
1385  }
1386  else
1387  {
1388  tmp = svm_fifo_find_next_chunk (f, f->tail_chunk,
1389  f->ooo_enq->start_byte);
1390  }
1391  if (tmp != f->ooo_enq)
1392  return 0;
1393  }
1394 
1395  if (f->start_chunk->next)
1396  {
1397  svm_fifo_chunk_t *c, *prev = 0, *tmp;
1398  u32 chunks_bytes = 0;
1399 
1400  c = f->start_chunk;
1401  do
1402  {
1403  tmp = svm_fifo_find_chunk (f, c->start_byte);
1404  if (tmp != c)
1405  return 0;
1406  if (prev && (prev->start_byte + prev->length != c->start_byte))
1407  return 0;
1408 
1409  if (c->enq_rb_index != RBTREE_TNIL_INDEX)
1410  {
1411  tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup, c->start_byte);
1412  if (tmp)
1413  {
1414  if (tmp != c)
1415  return 0;
1416  }
1417  }
1418  if (c->deq_rb_index != RBTREE_TNIL_INDEX)
1419  {
1420  tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup, c->start_byte);
1421  if (tmp)
1422  {
1423  if (tmp != c)
1424  return 0;
1425  }
1426  }
1427 
1428  chunks_bytes += c->length;
1429  prev = c;
1430  c = c->next;
1431  }
1432  while (c);
1433 
1434  if (chunks_bytes < f->tail - f->head)
1435  return 0;
1436  }
1437 
1438  return 1;
1439 }
1440 
1441 u32
1443 {
1445  int n_chunks = 0;
1446 
1447  c = f->start_chunk;
1448  while (c)
1449  {
1450  n_chunks++;
1451  c = c->next;
1452  }
1453 
1454  return n_chunks;
1455 }
1456 
1457 u8 *
1458 format_ooo_segment (u8 * s, va_list * args)
1459 {
1460  svm_fifo_t __clib_unused *f = va_arg (*args, svm_fifo_t *);
1461  ooo_segment_t *seg = va_arg (*args, ooo_segment_t *);
1462  s = format (s, "[%u, %u], len %u, next %d, prev %d", seg->start,
1463  seg->start + seg->length, seg->length, seg->next, seg->prev);
1464  return s;
1465 }
1466 
1467 u8 *
1469 {
1470 #if SVM_FIFO_TRACE
1471  svm_fifo_trace_elem_t *seg = 0;
1472  int i = 0;
1473 
1474  if (f->trace)
1475  {
1476  vec_foreach (seg, f->trace)
1477  {
1478  s = format (s, "{%u, %u, %u}, ", seg->offset, seg->len, seg->action);
1479  i++;
1480  if (i % 5 == 0)
1481  s = format (s, "\n");
1482  }
1483  s = format (s, "\n");
1484  }
1485  return s;
1486 #else
1487  return 0;
1488 #endif
1489 }
1490 
1491 u8 *
1492 svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose)
1493 {
1494  int i, trace_len;
1495  u8 *data = 0;
1497  u32 offset;
1498  svm_fifo_t *placeholder_fifo;
1499 
1500  if (!f)
1501  return s;
1502 
1503 #if SVM_FIFO_TRACE
1504  trace = f->trace;
1505  trace_len = vec_len (trace);
1506 #else
1507  trace = 0;
1508  trace_len = 0;
1509 #endif
1510 
1511  placeholder_fifo = svm_fifo_alloc (f->size);
1512  svm_fifo_init (f, f->size);
1513  clib_memset (f->head_chunk->data, 0xFF, f->size);
1514  vec_validate (data, f->size);
1515  for (i = 0; i < vec_len (data); i++)
1516  data[i] = i;
1517 
1518  for (i = 0; i < trace_len; i++)
1519  {
1520  offset = trace[i].offset;
1521  if (trace[i].action == 1)
1522  {
1523  if (verbose)
1524  s = format (s, "adding [%u, %u]:", trace[i].offset,
1525  (trace[i].offset + trace[i].len));
1526  svm_fifo_enqueue_with_offset (placeholder_fifo, trace[i].offset,
1527  trace[i].len, &data[offset]);
1528  }
1529  else if (trace[i].action == 2)
1530  {
1531  if (verbose)
1532  s = format (s, "adding [%u, %u]:", 0, trace[i].len);
1533  svm_fifo_enqueue (placeholder_fifo, trace[i].len, &data[offset]);
1534  }
1535  else if (!no_read)
1536  {
1537  if (verbose)
1538  s = format (s, "read: %u", trace[i].len);
1539  svm_fifo_dequeue_drop (placeholder_fifo, trace[i].len);
1540  }
1541  if (verbose)
1542  s = format (s, "%U", format_svm_fifo, placeholder_fifo, 1);
1543  }
1544 
1545  s = format (s, "result: %U", format_svm_fifo, placeholder_fifo, 1);
1546 
1547  return s;
1548 }
1549 
1550 u8 *
1551 format_ooo_list (u8 * s, va_list * args)
1552 {
1553  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1554  u32 indent = va_arg (*args, u32);
1555  u32 ooo_segment_index = f->ooos_list_head;
1556  ooo_segment_t *seg;
1557 
1558  while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX)
1559  {
1560  seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index);
1561  s = format (s, "%U%U\n", format_white_space, indent, format_ooo_segment,
1562  f, seg);
1563  ooo_segment_index = seg->next;
1564  }
1565 
1566  return s;
1567 }
1568 
1569 u8 *
1570 format_svm_fifo (u8 * s, va_list * args)
1571 {
1572  svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
1573  int verbose = va_arg (*args, int);
1574  u32 indent;
1575 
1576  if (!s)
1577  return s;
1578 
1579  indent = format_get_indent (s);
1580  s = format (s, "cursize %u nitems %u has_event %d min_alloc %u\n",
1581  svm_fifo_max_dequeue (f), f->size, f->has_event, f->min_alloc);
1582  s = format (s, "%Uhead %u tail %u segment manager %u\n", format_white_space,
1583  indent, f->head, f->tail, f->segment_manager);
1584 
1585  if (verbose > 1)
1586  s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n",
1587  format_white_space, indent, f->master_session_index,
1588  f->master_thread_index, f->client_session_index,
1589  f->client_thread_index);
1590 
1591  if (verbose)
1592  {
1593  s = format (s, "%Uooo pool %d active elts newest %u\n",
1594  format_white_space, indent, pool_elts (f->ooo_segments),
1595  f->ooos_newest);
1596  if (svm_fifo_has_ooo_data (f))
1597  s = format (s, " %U", format_ooo_list, f, indent, verbose);
1598  }
1599  return s;
1600 }
1601 
1602 #endif
1603 /*
1604  * fd.io coding-style-patch-verification: ON
1605  *
1606  * Local Variables:
1607  * eval: (c-set-style "gnu")
1608  * End:
1609  */
u32 length
length of chunk in bytes
Definition: fifo_types.h:32
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static void svm_fifo_copy_to_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:91
static void f_load_head_tail_all_acq(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail independent of producer/consumer role.
Definition: svm_fifo.h:107
#define CLIB_MEM_UNPOISON(a, s)
Definition: sanitizer.h:47
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
#define clib_min(x, y)
Definition: clib.h:328
static int ooo_segment_try_collect(svm_fifo_t *f, u32 n_bytes_enqueued, u32 *tail)
Removes segments that can now be enqueued because the fifo&#39;s tail has advanced.
Definition: svm_fifo.c:308
static u32 svm_fifo_max_enqueue_prod(svm_fifo_t *f)
Maximum number of bytes that can be enqueued into fifo.
Definition: svm_fifo.h:550
static ooo_segment_t * ooo_segment_next(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:127
int svm_fifo_segments(svm_fifo_t *f, u32 offset, svm_fifo_seg_t *fs, u32 n_segs, u32 max_bytes)
Get pointers to fifo chunks data in svm_fifo_seg_t array.
Definition: svm_fifo.c:1225
static rb_node_t * rb_node_left(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:94
void svm_fifo_free_chunk_lookup(svm_fifo_t *f)
Cleanup fifo chunk lookup rb tree.
Definition: svm_fifo.c:755
static u8 svm_fifo_has_ooo_data(svm_fifo_t *f)
Check if fifo has out-of-order data.
Definition: svm_fifo.h:656
void svm_fifo_init_pointers(svm_fifo_t *f, u32 head, u32 tail)
Set fifo pointers to requested offset.
Definition: svm_fifo.c:1310
static u32 f_free_count(svm_fifo_t *f, u32 head, u32 tail)
Fifo free bytes, i.e., number of free bytes.
Definition: svm_fifo.h:132
void svm_fifo_free(svm_fifo_t *f)
Free fifo and associated state.
Definition: svm_fifo.c:762
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
u32 prev
Previous linked-list element pool index.
Definition: fifo_types.h:42
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static void f_load_head_tail_cons(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for consumer.
Definition: svm_fifo.h:80
static void svm_fifo_copy_from_chunk(svm_fifo_t *f, svm_fifo_chunk_t *c, u32 head_idx, u8 *dst, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:99
#define CLIB_MARCH_FN_SELECT(fn)
Definition: cpu.h:441
svm_fifo_chunk_t * fsh_alloc_chunk(fifo_segment_header_t *fsh, u32 slice_index, u32 chunk_size)
Allocate chunks in fifo segment.
Definition: fifo_segment.c:706
vl_api_address_t src
Definition: gre.api:54
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
int svm_fifo_peek(svm_fifo_t *f, u32 offset, u32 len, u8 *dst)
Peek data from fifo.
Definition: svm_fifo.c:1127
static svm_fifo_chunk_t * f_lookup_clear_deq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:731
void svm_fifo_enqueue_nocopy(svm_fifo_t *f, u32 len)
Advance tail.
Definition: svm_fifo.c:935
void svm_fifo_init(svm_fifo_t *f, u32 size)
Initialize fifo.
Definition: svm_fifo.c:370
static rb_node_t * rb_node(rb_tree_t *rt, rb_node_index_t ri)
Definition: rbtree.h:82
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define RBTREE_TNIL_INDEX
Definition: rbtree.h:22
ooo_segment_t * svm_fifo_first_ooo_segment(svm_fifo_t *f)
First out-of-order segment for fifo.
Definition: svm_fifo.c:1301
void svm_fifo_dequeue_drop_all(svm_fifo_t *f)
Drop all data from fifo.
Definition: svm_fifo.c:1189
static rb_node_t * rb_node_right(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:88
void svm_fifo_overwrite_head(svm_fifo_t *f, u8 *src, u32 len)
Overwrite fifo head with new data.
Definition: svm_fifo.c:775
#define SVM_FIFO_INVALID_INDEX
Definition: svm_fifo.h:30
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:251
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:90
__clib_export rb_node_t * rb_tree_predecessor(rb_tree_t *rt, rb_node_t *x)
Definition: rbtree.c:287
__clib_export void rb_tree_free_nodes(rb_tree_t *rt)
Definition: rbtree.c:476
static ooo_segment_t * ooo_segment_alloc(svm_fifo_t *f, u32 start, u32 length)
Definition: svm_fifo.c:135
static svm_fifo_chunk_t * svm_fifo_find_chunk(svm_fifo_t *f, u32 pos)
Find chunk for given byte position.
Definition: svm_fifo.c:485
void svm_fifo_clone(svm_fifo_t *df, svm_fifo_t *sf)
Clones fifo.
Definition: svm_fifo.c:1280
int svm_fifo_enqueue_segments(svm_fifo_t *f, const svm_fifo_seg_t segs[], u32 n_segs, u8 allow_partial)
Enqueue array of svm_fifo_seg_t in order.
Definition: svm_fifo.c:959
static u32 svm_fifo_max_dequeue(svm_fifo_t *f)
Fifo max bytes to dequeue.
Definition: svm_fifo.h:459
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u32 svm_fifo_n_chunks(svm_fifo_t *f)
Number of chunks linked into the fifo.
Definition: svm_fifo.c:1442
svm_fifo_chunk_t * svm_fifo_chunk_alloc(u32 size)
Creates a fifo chunk in the current heap.
Definition: svm_fifo.c:459
u8 * format_ooo_list(u8 *s, va_list *args)
Definition: svm_fifo.c:1551
description fragment has unexpected format
Definition: map.api:433
void svm_fifo_free_ooo_data(svm_fifo_t *f)
Cleanup fifo ooo data.
Definition: svm_fifo.c:113
static void ooo_segment_free(svm_fifo_t *f, u32 index)
Definition: svm_fifo.c:149
__clib_export rb_node_index_t rb_tree_add_custom(rb_tree_t *rt, u32 key, uword opaque, rb_tree_lt_fn ltfn)
Definition: rbtree.c:195
unsigned int u32
Definition: types.h:88
int svm_fifo_dequeue(svm_fifo_t *f, u32 len, u8 *dst)
Dequeue data from fifo.
Definition: svm_fifo.c:1092
u32 key
node key
Definition: rbtree.h:38
static svm_fifo_chunk_t * f_unlink_chunks(svm_fifo_t *f, u32 end_pos, u8 maybe_ooo)
Definition: svm_fifo.c:1040
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
static int f_pos_gt(u32 a, u32 b)
Definition: svm_fifo.h:156
uword opaque
value stored by node
Definition: rbtree.h:39
static ooo_segment_t * ooo_segment_prev(svm_fifo_t *f, ooo_segment_t *s)
Definition: svm_fifo.c:119
struct svm_fifo_chunk_ * next
pointer to next chunk in linked-lists
Definition: fifo_types.h:33
void fsh_collect_chunks(fifo_segment_header_t *fsh, u32 slice_index, svm_fifo_chunk_t *c)
Return chunks to fifo segment.
Definition: fifo_segment.c:739
u32 size
Definition: vhost_user.h:106
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
int svm_fifo_enqueue(svm_fifo_t *f, u32 len, const u8 *src)
Enqueue data to fifo.
Definition: svm_fifo.c:840
#define SVM_FIFO_MAX_EVT_SUBSCRIBERS
Definition: fifo_types.h:25
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
static u32 f_chunk_end(svm_fifo_chunk_t *c)
Definition: svm_fifo.h:138
#define svm_fifo_trace_add(_f, _s, _l, _t)
Definition: svm_fifo.h:68
vl_api_address_t dst
Definition: gre.api:55
u8 * svm_fifo_replay(u8 *s, svm_fifo_t *f, u8 no_read, u8 verbose)
Definition: svm_fifo.c:1492
CLIB_MARCH_FN(svm_fifo_copy_to_chunk, void, svm_fifo_t *f, svm_fifo_chunk_t *c, u32 tail_idx, const u8 *src, u32 len, svm_fifo_chunk_t **last)
Definition: svm_fifo.c:24
u8 len
Definition: ip_types.api:103
void svm_fifo_add_subscriber(svm_fifo_t *f, u8 subscriber)
Add io events subscriber to list.
Definition: svm_fifo.c:1326
#define pool_free(p)
Free a pool.
Definition: pool.h:440
svmdb_client_t * c
__clib_export void rb_tree_del_node(rb_tree_t *rt, rb_node_t *z)
Definition: rbtree.c:445
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static u32 f_cursize(svm_fifo_t *f, u32 head, u32 tail)
Fifo current size, i.e., number of bytes enqueued.
Definition: svm_fifo.h:121
static void f_load_head_tail_prod(svm_fifo_t *f, u32 *head, u32 *tail)
Load head and tail optimized for producer.
Definition: svm_fifo.h:93
u32 start_byte
chunk start byte
Definition: fifo_types.h:31
u8 svm_fifo_is_sane(svm_fifo_t *f)
Check if fifo is sane.
Definition: svm_fifo.c:1349
static svm_fifo_chunk_t * svm_fifo_find_next_chunk(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 pos)
Definition: svm_fifo.c:497
#define OOO_SEGMENT_INVALID_INDEX
Definition: svm_fifo.h:28
u8 * format_ooo_segment(u8 *s, va_list *args)
Definition: svm_fifo.c:1458
static svm_fifo_chunk_t * f_lookup_clear_enq_chunks(svm_fifo_t *f, svm_fifo_chunk_t *start, u32 end_pos)
Definition: svm_fifo.c:697
u32 svm_fifo_n_ooo_segments(svm_fifo_t *f)
Number of out-of-order segments for fifo.
Definition: svm_fifo.c:1295
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
Definition: mem.h:277
signed int i32
Definition: types.h:77
#define uword_to_pointer(u, type)
Definition: types.h:136
u8 * format_svm_fifo(u8 *s, va_list *args)
Definition: svm_fifo.c:1570
#define ASSERT(truth)
static u8 f_chunk_includes_pos(svm_fifo_chunk_t *c, u32 pos)
Definition: svm_fifo.h:168
static int f_pos_geq(u32 a, u32 b)
Definition: svm_fifo.h:162
rb_node_index_t deq_rb_index
deq node index if chunk in rbtree
Definition: fifo_types.h:35
static void clib_mem_free(void *p)
Definition: mem.h:311
u8 data[0]
start of chunk data
Definition: fifo_types.h:36
void svm_fifo_del_subscriber(svm_fifo_t *f, u8 subscriber)
Remove io events subscriber form list.
Definition: svm_fifo.c:1334
int svm_fifo_enqueue_with_offset(svm_fifo_t *f, u32 offset, u32 len, u8 *src)
Enqueue a future segment.
Definition: svm_fifo.c:898
__clib_export int rb_tree_is_init(rb_tree_t *rt)
Definition: rbtree.c:496
char const int length
Definition: cJSON.h:163
static u32 ooo_segment_end_pos(ooo_segment_t *s)
Definition: svm_fifo.c:107
static void f_update_ooo_deq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:648
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define clib_atomic_store_rel_n(a, b)
Definition: atomics.h:49
#define clib_max(x, y)
Definition: clib.h:321
u32 length
Length of segment.
Definition: fifo_types.h:44
u8 * svm_fifo_dump_trace(u8 *s, svm_fifo_t *f)
Definition: svm_fifo.c:1468
__clib_export void rb_tree_init(rb_tree_t *rt)
Definition: rbtree.c:483
u32 next
Next linked-list element pool index.
Definition: fifo_types.h:41
u32 svm_fifo_max_read_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be read.
Definition: svm_fifo.c:511
static int f_pos_leq(u32 a, u32 b)
Definition: svm_fifo.h:150
template key/value backing page structure
Definition: bihash_doc.h:44
__clib_export u32 rb_tree_n_nodes(rb_tree_t *rt)
Definition: rbtree.c:470
int svm_fifo_fill_chunk_list(svm_fifo_t *f)
Ensure the whole fifo size is writeable.
Definition: svm_fifo.c:1209
static u8 rb_node_is_tnil(rb_tree_t *rt, rb_node_t *n)
Definition: rbtree.h:76
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword max_log2(uword x)
Definition: clib.h:209
static rb_node_t * f_find_node_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:542
u32 index
Definition: flow_types.api:221
static svm_fifo_chunk_t * f_find_chunk_rbtree(rb_tree_t *rt, u32 pos)
Definition: svm_fifo.c:580
rb_node_index_t enq_rb_index
enq node index if chunk in rbtree
Definition: fifo_types.h:34
struct clib_bihash_value offset
template key/value backing page structure
static int f_try_chunk_alloc(svm_fifo_t *f, u32 head, u32 tail, u32 len)
Definition: svm_fifo.c:802
static __clib_unused ooo_segment_t * ooo_segment_last(svm_fifo_t *f)
Definition: svm_fifo.c:356
#define vec_foreach(var, vec)
Vector iterator.
save_rewrite_length must be aligned so that reass doesn t overwrite it
Definition: buffer.h:401
u32 svm_fifo_max_write_chunk(svm_fifo_t *f)
Max contiguous chunk of data that can be written.
Definition: svm_fifo.c:531
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
int svm_fifo_dequeue_drop(svm_fifo_t *f, u32 len)
Dequeue and drop bytes from fifo.
Definition: svm_fifo.c:1151
struct _svm_fifo svm_fifo_t
rb_node_index_t root
root index
Definition: rbtree.h:45
u32 start
Start of segment, normalized.
Definition: fifo_types.h:43
void svm_fifo_init_ooo_lookup(svm_fifo_t *f, u8 ooo_type)
Initialize rbtrees used for ooo lookups.
Definition: svm_fifo.c:405
static int f_pos_lt(u32 a, u32 b)
Definition: svm_fifo.h:144
static void ooo_segment_add(svm_fifo_t *f, u32 offset, u32 head, u32 tail, u32 length)
Add segment to fifo&#39;s out-of-order segment list.
Definition: svm_fifo.c:178
svm_fifo_t * svm_fifo_alloc(u32 data_size_in_bytes)
Creates a fifo in the current heap.
Definition: svm_fifo.c:423
static void f_update_ooo_enq(svm_fifo_t *f, u32 start_pos, u32 end_pos)
Definition: svm_fifo.c:599
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:127