FD.io VPP  v19.08.2-294-g37e99c22d
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vppinfra/ring.h>
20 #include <vlib/unix/unix.h>
21 #include <vlib/pci/pci.h>
22 #include <vnet/ethernet/ethernet.h>
23 
24 #include <avf/avf.h>
25 
26 #define AVF_MBOX_LEN 64
27 #define AVF_MBOX_BUF_SZ 512
28 #define AVF_RXQ_SZ 512
29 #define AVF_TXQ_SZ 512
30 #define AVF_ITR_INT 32
31 
32 #define PCI_VENDOR_ID_INTEL 0x8086
33 #define PCI_DEVICE_ID_INTEL_AVF 0x1889
34 #define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
35 #define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
36 
38 
39 static pci_device_id_t avf_pci_device_ids[] = {
41  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
42  {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X722_VF},
43  {0},
44 };
45 
46 const static char *virtchnl_event_names[] = {
47 #define _(v, n) [v] = #n,
49 #undef _
50 };
51 
52 const static char *virtchnl_link_speed_str[] = {
53 #define _(v, n, s) [v] = s,
55 #undef _
56 };
57 
58 static inline void
60 {
61  u32 dyn_ctl0 = 0, icr0_ena = 0;
62 
63  dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
64 
65  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
66  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
67  avf_reg_flush (ad);
68 }
69 
70 static inline void
72 {
73  u32 dyn_ctl0 = 0, icr0_ena = 0;
74 
75  icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
76 
77  dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
78  dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
79  //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
80  dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
81 
82  avf_irq_0_disable (ad);
83  avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
84  avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
85  avf_reg_flush (ad);
86 }
87 
88 static inline void
90 {
91  u32 dyn_ctln = 0;
92 
93  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
94  avf_reg_flush (ad);
95 }
96 
97 static inline void
99 {
100  u32 dyn_ctln = 0;
101 
102  dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
103  dyn_ctln |= (1 << 1); /* [1] Clear PBA */
104  dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
105 
106  avf_irq_n_disable (ad, line);
107  avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
108  avf_reg_flush (ad);
109 }
110 
111 
112 clib_error_t *
114  void *data, int len)
115 {
116  clib_error_t *err = 0;
117  avf_aq_desc_t *d, dc;
118  f64 t0, wait_time, suspend_time = AVF_AQ_ENQ_SUSPEND_TIME;
119 
120  d = &ad->atq[ad->atq_next_slot];
121  clib_memcpy_fast (d, dt, sizeof (avf_aq_desc_t));
122  d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
123  if (len)
124  d->datalen = len;
125  if (len)
126  {
127  u64 pa;
128  pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
129  d->addr_hi = (u32) (pa >> 32);
130  d->addr_lo = (u32) pa;
132  data, len);
133  d->flags |= AVF_AQ_F_BUF;
134  }
135 
136  if (ad->flags & AVF_DEVICE_F_ELOG)
137  clib_memcpy_fast (&dc, d, sizeof (avf_aq_desc_t));
138 
140  ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
142  avf_reg_flush (ad);
143 
144  t0 = vlib_time_now (vm);
145 retry:
146  vlib_process_suspend (vm, suspend_time);
147  wait_time = vlib_time_now (vm) - t0;
148 
149  if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
150  {
151  if (wait_time > AVF_AQ_ENQ_MAX_WAIT_TIME)
152  {
153  err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
154  d->opcode);
155  goto done;
156  }
157  suspend_time *= 2;
158  goto retry;
159  }
160 
161  clib_memcpy_fast (dt, d, sizeof (avf_aq_desc_t));
162  if (d->flags & AVF_AQ_F_ERR)
163  return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
164  "%d]", d->opcode, d->retval);
165 
166 done:
167  if (ad->flags & AVF_DEVICE_F_ELOG)
168  {
169  /* *INDENT-OFF* */
170  ELOG_TYPE_DECLARE (el) =
171  {
172  .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
173  "datalen %d retval %d",
174  .format_args = "i4i2i2i2i2i2",
175  };
176  struct
177  {
178  u32 dev_instance;
179  u16 s_flags;
180  u16 r_flags;
181  u16 opcode;
182  u16 datalen;
183  u16 retval;
184  } *ed;
185  ed = ELOG_DATA (&vm->elog_main, el);
186  ed->dev_instance = ad->dev_instance;
187  ed->s_flags = dc.flags;
188  ed->r_flags = d->flags;
189  ed->opcode = dc.opcode;
190  ed->datalen = dc.datalen;
191  ed->retval = d->retval;
192  /* *INDENT-ON* */
193  }
194 
195  return err;
196 }
197 
198 clib_error_t *
200  u32 val)
201 {
202  clib_error_t *err;
203  avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
204  err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
205 
206  if (ad->flags & AVF_DEVICE_F_ELOG)
207  {
208  /* *INDENT-OFF* */
209  ELOG_TYPE_DECLARE (el) =
210  {
211  .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
212  .format_args = "i4i4i4",
213  };
214  struct
215  {
216  u32 dev_instance;
217  u32 reg;
218  u32 val;
219  } *ed;
220  ed = ELOG_DATA (&vm->elog_main, el);
221  ed->dev_instance = ad->dev_instance;
222  ed->reg = reg;
223  ed->val = val;
224  /* *INDENT-ON* */
225  }
226  return err;
227 }
228 
229 clib_error_t *
230 avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 rxq_size)
231 {
232  clib_error_t *err;
233  avf_rxq_t *rxq;
234  u32 n_alloc, i;
235 
237  rxq = vec_elt_at_index (ad->rxqs, qid);
238  rxq->size = rxq_size;
239  rxq->next = 0;
241  sizeof (avf_rx_desc_t),
243  ad->numa_node);
244 
245  rxq->buffer_pool_index =
247 
248  if (rxq->descs == 0)
249  return vlib_physmem_last_error (vm);
250 
251  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) rxq->descs)))
252  return err;
253 
254  clib_memset ((void *) rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
256  rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
257 
258  n_alloc = vlib_buffer_alloc_from_pool (vm, rxq->bufs, rxq->size - 8,
259  rxq->buffer_pool_index);
260 
261  if (n_alloc == 0)
262  return clib_error_return (0, "buffer allocation error");
263 
264  rxq->n_enqueued = n_alloc;
265  avf_rx_desc_t *d = rxq->descs;
266  for (i = 0; i < n_alloc; i++)
267  {
268  vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
269  if (ad->flags & AVF_DEVICE_F_VA_DMA)
270  d->qword[0] = vlib_buffer_get_va (b);
271  else
272  d->qword[0] = vlib_buffer_get_pa (vm, b);
273  d++;
274  }
275 
276  ad->n_rx_queues = clib_min (ad->num_queue_pairs, qid + 1);
277  return 0;
278 }
279 
280 clib_error_t *
281 avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size)
282 {
283  clib_error_t *err;
284  avf_txq_t *txq;
285 
286  if (qid >= ad->num_queue_pairs)
287  {
288  qid = qid % ad->num_queue_pairs;
289  txq = vec_elt_at_index (ad->txqs, qid);
290  if (txq->lock == 0)
291  clib_spinlock_init (&txq->lock);
292  ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
293  return 0;
294  }
295 
297  txq = vec_elt_at_index (ad->txqs, qid);
298  txq->size = txq_size;
299  txq->next = 0;
301  sizeof (avf_tx_desc_t),
303  ad->numa_node);
304  if (txq->descs == 0)
305  return vlib_physmem_last_error (vm);
306 
307  if ((err = vlib_pci_map_dma (vm, ad->pci_dev_handle, (void *) txq->descs)))
308  return err;
309 
311  txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
312 
313  /* initialize ring of pending RS slots */
315 
316  ad->n_tx_queues = clib_min (ad->num_queue_pairs, qid + 1);
317  return 0;
318 }
319 
320 typedef struct
321 {
325 
326 void
328 {
329  avf_aq_desc_t *d;
330  u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
331  d = &ad->arq[slot];
332  clib_memset (d, 0, sizeof (avf_aq_desc_t));
333  d->flags = AVF_AQ_F_BUF;
335  d->addr_hi = (u32) (pa >> 32);
336  d->addr_lo = (u32) pa;
337 }
338 
339 static inline uword
341 {
342  return (ad->flags & AVF_DEVICE_F_VA_DMA) ?
344 }
345 
346 static void
348 {
349  u64 pa;
350  int i;
351 
352  /* VF MailBox Transmit */
353  clib_memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
354  ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
355 
356  pa = avf_dma_addr (vm, ad, ad->atq);
357  avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
358  avf_reg_write (ad, AVF_ATQH, 0); /* Head */
359  avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
360  avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
361  avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
362 
363  /* VF MailBox Receive */
364  clib_memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
365  ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
366 
367  for (i = 0; i < AVF_MBOX_LEN; i++)
368  avf_arq_slot_init (ad, i);
369 
370  pa = avf_dma_addr (vm, ad, ad->arq);
371 
372  avf_reg_write (ad, AVF_ARQH, 0); /* Head */
373  avf_reg_write (ad, AVF_ARQT, 0); /* Head */
374  avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1ULL << 31)); /* len & ena */
375  avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
376  avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
377  avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
378 
379  ad->atq_next_slot = 0;
380  ad->arq_next_slot = 0;
381 }
382 
383 clib_error_t *
385  void *in, int in_len, void *out, int out_len)
386 {
387  clib_error_t *err;
388  avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
389  u32 head;
390  int n_retry = 5;
391 
392 
393  /* suppress interrupt in the next adminq receive slot
394  as we are going to wait for response
395  we only need interrupts when event is received */
396  d = &ad->arq[ad->arq_next_slot];
397  d->flags |= AVF_AQ_F_SI;
398 
399  if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
400  return err;
401 
402 retry:
403  head = avf_get_u32 (ad->bar0, AVF_ARQH);
404 
405  if (ad->arq_next_slot == head)
406  {
407  if (--n_retry == 0)
408  return clib_error_return (0, "timeout");
409  vlib_process_suspend (vm, 10e-3);
410  goto retry;
411  }
412 
413  d = &ad->arq[ad->arq_next_slot];
414 
415  if (d->v_opcode == VIRTCHNL_OP_EVENT)
416  {
417  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
419 
420  if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
421  ((d->flags & AVF_AQ_F_BUF) == 0))
422  return clib_error_return (0, "event message error");
423 
424  vec_add2 (ad->events, e, 1);
425  clib_memcpy_fast (e, buf, sizeof (virtchnl_pf_event_t));
427  ad->arq_next_slot++;
428  n_retry = 5;
429  goto retry;
430  }
431 
432  if (d->v_opcode != op)
433  {
434  err =
436  "unexpected message receiver [v_opcode = %u, "
437  "expected %u, v_retval %d]", d->v_opcode, op,
438  d->v_retval);
439  goto done;
440  }
441 
442  if (d->v_retval)
443  {
444  err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
445  d->v_opcode, d->v_retval);
446  goto done;
447  }
448 
449  if (d->flags & AVF_AQ_F_BUF)
450  {
451  void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
452  clib_memcpy_fast (out, buf, out_len);
453  }
454 
457  avf_reg_flush (ad);
458  ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
459 
460 done:
461 
462  if (ad->flags & AVF_DEVICE_F_ELOG)
463  {
464  /* *INDENT-OFF* */
465  ELOG_TYPE_DECLARE (el) =
466  {
467  .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
468  .format_args = "i4t4i4i4",
469  .n_enum_strings = VIRTCHNL_N_OPS,
470  .enum_strings = {
471 #define _(v, n) [v] = #n,
473 #undef _
474  },
475  };
476  struct
477  {
478  u32 dev_instance;
479  u32 v_opcode;
480  u32 v_opcode_val;
481  u32 v_retval;
482  } *ed;
483  ed = ELOG_DATA (&vm->elog_main, el);
484  ed->dev_instance = ad->dev_instance;
485  ed->v_opcode = op;
486  ed->v_opcode_val = op;
487  ed->v_retval = d->v_retval;
488  /* *INDENT-ON* */
489  }
490  return err;
491 }
492 
493 clib_error_t *
496 {
497  clib_error_t *err = 0;
498  virtchnl_version_info_t myver = {
500  .minor = VIRTCHNL_VERSION_MINOR,
501  };
502 
503  avf_log_debug (ad, "version: major %u minor %u", myver.major, myver.minor);
504 
505  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
506  sizeof (virtchnl_version_info_t), ver,
507  sizeof (virtchnl_version_info_t));
508 
509  if (err)
510  return err;
511 
512  return err;
513 }
514 
515 clib_error_t *
518 {
519  clib_error_t *err = 0;
520  u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_PF |
521  VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_VLAN |
522  VIRTCHNL_VF_OFFLOAD_RX_POLLING);
523 
524  avf_log_debug (ad, "get_vf_reqources: bitmap 0x%x", bitmap);
525  err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
526  sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
527 
528  if (err == 0)
529  {
530  int i;
531  avf_log_debug (ad, "get_vf_reqources: num_vsis %u num_queue_pairs %u "
532  "max_vectors %u max_mtu %u vf_offload_flags 0x%04x "
533  "rss_key_size %u rss_lut_size %u",
534  res->num_vsis, res->num_queue_pairs, res->max_vectors,
535  res->max_mtu, res->vf_offload_flags, res->rss_key_size,
536  res->rss_lut_size);
537  for (i = 0; i < res->num_vsis; i++)
538  avf_log_debug (ad, "get_vf_reqources_vsi[%u]: vsi_id %u "
539  "num_queue_pairs %u vsi_type %u qset_handle %u "
540  "default_mac_addr %U", i,
541  res->vsi_res[i].vsi_id,
542  res->vsi_res[i].num_queue_pairs,
543  res->vsi_res[i].vsi_type,
544  res->vsi_res[i].qset_handle,
546  res->vsi_res[i].default_mac_addr);
547  }
548 
549  return err;
550 }
551 
552 clib_error_t *
554 {
555  int msg_len = sizeof (virtchnl_rss_lut_t) + ad->rss_lut_size - 1;
556  int i;
557  u8 msg[msg_len];
558  virtchnl_rss_lut_t *rl;
559 
560  clib_memset (msg, 0, msg_len);
561  rl = (virtchnl_rss_lut_t *) msg;
562  rl->vsi_id = ad->vsi_id;
563  rl->lut_entries = ad->rss_lut_size;
564  for (i = 0; i < ad->rss_lut_size; i++)
565  rl->lut[i] = i % ad->n_rx_queues;
566 
567  avf_log_debug (ad, "config_rss_lut: vsi_id %u rss_lut_size %u lut 0x%U",
568  rl->vsi_id, rl->lut_entries, format_hex_bytes_no_wrap,
569  rl->lut, rl->lut_entries);
570 
571  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_LUT, msg, msg_len, 0,
572  0);
573 }
574 
575 clib_error_t *
577 {
578  int msg_len = sizeof (virtchnl_rss_key_t) + ad->rss_key_size - 1;
579  int i;
580  u8 msg[msg_len];
581  virtchnl_rss_key_t *rk;
582 
583  clib_memset (msg, 0, msg_len);
584  rk = (virtchnl_rss_key_t *) msg;
585  rk->vsi_id = ad->vsi_id;
586  rk->key_len = ad->rss_key_size;
587  u32 seed = random_default_seed ();
588  for (i = 0; i < ad->rss_key_size; i++)
589  rk->key[i] = (u8) random_u32 (&seed);
590 
591  avf_log_debug (ad, "config_rss_key: vsi_id %u rss_key_size %u key 0x%U",
592  rk->vsi_id, rk->key_len, format_hex_bytes_no_wrap, rk->key,
593  rk->key_len);
594 
595  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_RSS_KEY, msg, msg_len, 0,
596  0);
597 }
598 
599 clib_error_t *
601 {
602  avf_log_debug (ad, "disable_vlan_stripping");
603 
604  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
605  0);
606 }
607 
608 clib_error_t *
610 {
611  virtchnl_promisc_info_t pi = { 0 };
612 
613  pi.vsi_id = ad->vsi_id;
614 
615  if (is_enable)
616  pi.flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
617 
618  avf_log_debug (ad, "config_promisc_mode: unicast %s multicast %s",
619  pi.flags & FLAG_VF_UNICAST_PROMISC ? "on" : "off",
620  pi.flags & FLAG_VF_MULTICAST_PROMISC ? "on" : "off");
621 
622  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
623  sizeof (virtchnl_promisc_info_t), 0, 0);
624 }
625 
626 
627 clib_error_t *
629 {
630  int i;
631  int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
632  int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
634  u8 msg[msg_len];
636 
637  clib_memset (msg, 0, msg_len);
639  ci->vsi_id = ad->vsi_id;
640  ci->num_queue_pairs = n_qp;
641 
642  avf_log_debug (ad, "config_vsi_queues: vsi_id %u num_queue_pairs %u",
643  ad->vsi_id, ci->num_queue_pairs);
644 
645  for (i = 0; i < n_qp; i++)
646  {
647  virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
648  virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
649 
650  rxq->vsi_id = ad->vsi_id;
651  rxq->queue_id = i;
653  if (i < vec_len (ad->rxqs))
654  {
655  avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
656  rxq->ring_len = q->size;
658  rxq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
659  avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
660  }
661  avf_log_debug (ad, "config_vsi_queues_rx[%u]: max_pkt_size %u "
662  "ring_len %u databuffer_size %u dma_ring_addr 0x%llx",
663  i, rxq->max_pkt_size, rxq->ring_len,
664  rxq->databuffer_size, rxq->dma_ring_addr);
665 
666  txq->vsi_id = ad->vsi_id;
667  txq->queue_id = i;
668  if (i < vec_len (ad->txqs))
669  {
670  avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
671  txq->ring_len = q->size;
672  txq->dma_ring_addr = avf_dma_addr (vm, ad, (void *) q->descs);
673  }
674  avf_log_debug (ad, "config_vsi_queues_tx[%u]: ring_len %u "
675  "dma_ring_addr 0x%llx", i, txq->ring_len,
676  txq->dma_ring_addr);
677  }
678 
679  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
680  0, 0);
681 }
682 
683 clib_error_t *
685 {
686  int count = 1;
687  int msg_len = sizeof (virtchnl_irq_map_info_t) +
688  count * sizeof (virtchnl_vector_map_t);
689  u8 msg[msg_len];
691 
692  clib_memset (msg, 0, msg_len);
693  imi = (virtchnl_irq_map_info_t *) msg;
694  imi->num_vectors = count;
695 
696  imi->vecmap[0].vector_id = 1;
697  imi->vecmap[0].vsi_id = ad->vsi_id;
698  imi->vecmap[0].rxq_map = (1 << ad->n_rx_queues) - 1;
699  imi->vecmap[0].txq_map = (1 << ad->n_tx_queues) - 1;
700 
701  avf_log_debug (ad, "config_irq_map: vsi_id %u vector_id %u rxq_map %u",
702  ad->vsi_id, imi->vecmap[0].vector_id,
703  imi->vecmap[0].rxq_map);
704 
705  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
706  0);
707 }
708 
709 clib_error_t *
711 {
712  int msg_len =
713  sizeof (virtchnl_ether_addr_list_t) +
714  count * sizeof (virtchnl_ether_addr_t);
715  u8 msg[msg_len];
717  int i;
718 
719  clib_memset (msg, 0, msg_len);
720  al = (virtchnl_ether_addr_list_t *) msg;
721  al->vsi_id = ad->vsi_id;
722  al->num_elements = count;
723 
724  avf_log_debug (ad, "add_eth_addr: vsi_id %u num_elements %u",
725  ad->vsi_id, al->num_elements);
726 
727  for (i = 0; i < count; i++)
728  {
729  clib_memcpy_fast (&al->list[i].addr, macs + i * 6, 6);
730  avf_log_debug (ad, "add_eth_addr[%u]: %U", i,
732  }
733  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
734  0);
735 }
736 
737 clib_error_t *
739 {
740  virtchnl_queue_select_t qs = { 0 };
741  int i = 0;
742  qs.vsi_id = ad->vsi_id;
743  qs.rx_queues = rx;
744  qs.tx_queues = tx;
745 
746  avf_log_debug (ad, "enable_queues: vsi_id %u rx_queues %u tx_queues %u",
747  ad->vsi_id, qs.rx_queues, qs.tx_queues);
748 
749  while (rx)
750  {
751  if (rx & (1 << i))
752  {
753  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
754  avf_reg_write (ad, AVF_QRX_TAIL (i), rxq->n_enqueued);
755  rx &= ~(1 << i);
756  }
757  i++;
758  }
759  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
760  sizeof (virtchnl_queue_select_t), 0, 0);
761 }
762 
763 clib_error_t *
766 {
767  virtchnl_queue_select_t qs = { 0 };
768  qs.vsi_id = ad->vsi_id;
769 
770  avf_log_debug (ad, "get_stats: vsi_id %u", ad->vsi_id);
771 
772  return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
773  &qs, sizeof (virtchnl_queue_select_t),
774  es, sizeof (virtchnl_eth_stats_t));
775 }
776 
777 clib_error_t *
779 {
780  avf_aq_desc_t d = { 0 };
781  clib_error_t *error;
782  u32 rstat;
783  int n_retry = 20;
784 
785  avf_log_debug (ad, "reset");
786 
787  d.opcode = 0x801;
788  d.v_opcode = VIRTCHNL_OP_RESET_VF;
789  if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
790  return error;
791 
792 retry:
793  vlib_process_suspend (vm, 10e-3);
794  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
795 
796  if (rstat == 2 || rstat == 3)
797  return 0;
798 
799  if (--n_retry == 0)
800  {
801  avf_log_err (ad, "reset failed");
802  return clib_error_return (0, "reset failed (timeout)");
803  }
804 
805  goto retry;
806 }
807 
808 clib_error_t *
810 {
811  virtchnl_vf_res_request_t res_req = { 0 };
812  clib_error_t *error;
813  u32 rstat;
814  int n_retry = 20;
815 
816  res_req.num_queue_pairs = num_queue_pairs;
817 
818  avf_log_debug (ad, "request_queues: num_queue_pairs %u", num_queue_pairs);
819 
820  error = avf_send_to_pf (vm, ad, VIRTCHNL_OP_REQUEST_QUEUES, &res_req,
821  sizeof (virtchnl_vf_res_request_t), &res_req,
822  sizeof (virtchnl_vf_res_request_t));
823 
824  /*
825  * if PF responds, the request failed
826  * else PF initializes restart and avf_send_to_pf returns an error
827  */
828  if (!error)
829  {
830  return clib_error_return (0, "requested more than %u queue pairs",
831  res_req.num_queue_pairs);
832  }
833 
834 retry:
835  vlib_process_suspend (vm, 10e-3);
836  rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
837 
838  if ((rstat == VIRTCHNL_VFR_COMPLETED) || (rstat == VIRTCHNL_VFR_VFACTIVE))
839  goto done;
840 
841  if (--n_retry == 0)
842  return clib_error_return (0, "reset failed (timeout)");
843 
844  goto retry;
845 
846 done:
847  return NULL;
848 }
849 
850 clib_error_t *
852  avf_create_if_args_t * args)
853 {
854  virtchnl_version_info_t ver = { 0 };
855  virtchnl_vf_resource_t res = { 0 };
856  clib_error_t *error;
858  int i;
859 
860  avf_adminq_init (vm, ad);
861 
862  if ((error = avf_request_queues (vm, ad, clib_max (tm->n_vlib_mains,
863  args->rxq_num))))
864  {
865  /* we failed to get more queues, but still we want to proceed */
866  clib_error_free (error);
867 
868  if ((error = avf_device_reset (vm, ad)))
869  return error;
870  }
871 
872  avf_adminq_init (vm, ad);
873 
874  /*
875  * OP_VERSION
876  */
877  if ((error = avf_op_version (vm, ad, &ver)))
878  return error;
879 
880  if (ver.major != VIRTCHNL_VERSION_MAJOR ||
882  return clib_error_return (0, "incompatible protocol version "
883  "(remote %d.%d)", ver.major, ver.minor);
884 
885  /*
886  * OP_GET_VF_RESOURCES
887  */
888  if ((error = avf_op_get_vf_resources (vm, ad, &res)))
889  return error;
890 
891  if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
892  return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
893 
894  ad->vsi_id = res.vsi_res[0].vsi_id;
897  ad->max_vectors = res.max_vectors;
898  ad->max_mtu = res.max_mtu;
899  ad->rss_key_size = res.rss_key_size;
900  ad->rss_lut_size = res.rss_lut_size;
901 
903 
904  /*
905  * Disable VLAN stripping
906  */
907  if ((error = avf_op_disable_vlan_stripping (vm, ad)))
908  return error;
909 
910  /*
911  * Init Queues
912  */
913  if (args->rxq_num == 0)
914  {
915  args->rxq_num = 1;
916  }
917  else if (args->rxq_num > ad->num_queue_pairs)
918  {
919  args->rxq_num = ad->num_queue_pairs;
920  avf_log_warn (ad, "Requested more rx queues than queue pairs available."
921  "Using %u rx queues.", args->rxq_num);
922  }
923 
924  for (i = 0; i < args->rxq_num; i++)
925  if ((error = avf_rxq_init (vm, ad, i, args->rxq_size)))
926  return error;
927 
928  for (i = 0; i < tm->n_vlib_mains; i++)
929  if ((error = avf_txq_init (vm, ad, i, args->txq_size)))
930  return error;
931 
932  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
933  (error = avf_op_config_rss_lut (vm, ad)))
934  return error;
935 
936  if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_RSS_PF) &&
937  (error = avf_op_config_rss_key (vm, ad)))
938  return error;
939 
940  if ((error = avf_op_config_vsi_queues (vm, ad)))
941  return error;
942 
943  if ((error = avf_op_config_irq_map (vm, ad)))
944  return error;
945 
946  avf_irq_0_enable (ad);
947  for (i = 0; i < ad->n_rx_queues; i++)
948  avf_irq_n_enable (ad, i);
949 
950  if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
951  return error;
952 
953  if ((error = avf_op_enable_queues (vm, ad, pow2_mask (ad->n_rx_queues),
954  pow2_mask (ad->n_tx_queues))))
955  return error;
956 
957  ad->flags |= AVF_DEVICE_F_INITIALIZED;
958  return error;
959 }
960 
961 void
963 {
964  avf_main_t *am = &avf_main;
965  vnet_main_t *vnm = vnet_get_main ();
967  u32 r;
968 
969  if (ad->flags & AVF_DEVICE_F_ERROR)
970  return;
971 
972  if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
973  return;
974 
975  ASSERT (ad->error == 0);
976 
977  /* do not process device in reset state */
978  r = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
979  if (r != VIRTCHNL_VFR_VFACTIVE)
980  return;
981 
982  r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
983  if ((r & 0xf0000000) != (1ULL << 31))
984  {
985  ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
986  avf_log_err (ad, "error: %U", format_clib_error, ad->error);
987  goto error;
988  }
989 
990  r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
991  if ((r & 0xf0000000) != (1ULL << 31))
992  {
993  ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
994  avf_log_err (ad, "error: %U", format_clib_error, ad->error);
995  goto error;
996  }
997 
998  if (is_irq == 0)
999  avf_op_get_stats (vm, ad, &ad->eth_stats);
1000 
1001  /* *INDENT-OFF* */
1002  vec_foreach (e, ad->events)
1003  {
1004  avf_log_debug (ad, "event: %s (%u) sev %d",
1006  if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
1007  {
1008  int link_up = e->event_data.link_event.link_status;
1009  virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
1010  u32 flags = 0;
1011  u32 kbps = 0;
1012 
1013  avf_log_debug (ad, "event_link_change: status %d speed '%s' (%d)",
1014  link_up,
1016  virtchnl_link_speed_str[speed] : "unknown", speed);
1017 
1018  if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
1019  {
1020  ad->flags |= AVF_DEVICE_F_LINK_UP;
1023  if (speed == VIRTCHNL_LINK_SPEED_40GB)
1024  kbps = 40000000;
1025  else if (speed == VIRTCHNL_LINK_SPEED_25GB)
1026  kbps = 25000000;
1027  else if (speed == VIRTCHNL_LINK_SPEED_10GB)
1028  kbps = 10000000;
1029  else if (speed == VIRTCHNL_LINK_SPEED_5GB)
1030  kbps = 5000000;
1031  else if (speed == VIRTCHNL_LINK_SPEED_2_5GB)
1032  kbps = 2500000;
1033  else if (speed == VIRTCHNL_LINK_SPEED_1GB)
1034  kbps = 1000000;
1035  else if (speed == VIRTCHNL_LINK_SPEED_100MB)
1036  kbps = 100000;
1037  vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
1039  ad->link_speed = speed;
1040  }
1041  else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
1042  {
1043  ad->flags &= ~AVF_DEVICE_F_LINK_UP;
1044  ad->link_speed = 0;
1045  }
1046 
1047  if (ad->flags & AVF_DEVICE_F_ELOG)
1048  {
1049  ELOG_TYPE_DECLARE (el) =
1050  {
1051  .format = "avf[%d] link change: link_status %d "
1052  "link_speed %d",
1053  .format_args = "i4i1i1",
1054  };
1055  struct
1056  {
1057  u32 dev_instance;
1058  u8 link_status;
1059  u8 link_speed;
1060  } *ed;
1061  ed = ELOG_DATA (&vm->elog_main, el);
1062  ed->dev_instance = ad->dev_instance;
1063  ed->link_status = link_up;
1064  ed->link_speed = speed;
1065  }
1066  }
1067  else
1068  {
1069  if (ad->flags & AVF_DEVICE_F_ELOG)
1070  {
1071  ELOG_TYPE_DECLARE (el) =
1072  {
1073  .format = "avf[%d] unknown event: event %d severity %d",
1074  .format_args = "i4i4i1i1",
1075  };
1076  struct
1077  {
1078  u32 dev_instance;
1079  u32 event;
1080  u32 severity;
1081  } *ed;
1082  ed = ELOG_DATA (&vm->elog_main, el);
1083  ed->dev_instance = ad->dev_instance;
1084  ed->event = e->event;
1085  ed->severity = e->severity;
1086  }
1087  }
1088  }
1089  /* *INDENT-ON* */
1090  vec_reset_length (ad->events);
1091 
1092  return;
1093 
1094 error:
1095  ad->flags |= AVF_DEVICE_F_ERROR;
1096  ASSERT (ad->error != 0);
1097  vlib_log_err (am->log_class, "%U", format_clib_error, ad->error);
1098 }
1099 
1100 static u32
1102 {
1104  avf_main_t *am = &avf_main;
1107  {
1108  clib_error_t *error;
1109  int promisc_enabled = (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0;
1110  u32 new_flags = promisc_enabled ?
1111  ad->flags | AVF_DEVICE_F_PROMISC : ad->flags & ~AVF_DEVICE_F_PROMISC;
1112 
1113  if (new_flags == ad->flags)
1114  return flags;
1115 
1116  if ((error = avf_config_promisc_mode (vm, ad, promisc_enabled)))
1117  {
1118  avf_log_err (ad, "%s: %U", format_clib_error, error);
1119  clib_error_free (error);
1120  return 0;
1121  }
1122 
1123  ad->flags = new_flags;
1124  }
1125  return 0;
1126 }
1127 
1128 static uword
1130 {
1131  avf_main_t *am = &avf_main;
1132  avf_device_t *ad;
1133  uword *event_data = 0, event_type;
1134  int enabled = 0, irq;
1135  f64 last_run_duration = 0;
1136  f64 last_periodic_time = 0;
1137 
1138  while (1)
1139  {
1140  if (enabled)
1141  vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
1142  else
1144 
1145  event_type = vlib_process_get_events (vm, &event_data);
1146  vec_reset_length (event_data);
1147  irq = 0;
1148 
1149  switch (event_type)
1150  {
1151  case ~0:
1152  last_periodic_time = vlib_time_now (vm);
1153  break;
1155  enabled = 1;
1156  break;
1158  enabled = 0;
1159  continue;
1161  irq = 1;
1162  break;
1163  default:
1164  ASSERT (0);
1165  }
1166 
1167  /* *INDENT-OFF* */
1168  pool_foreach (ad, am->devices,
1169  {
1170  avf_process_one_device (vm, ad, irq);
1171  });
1172  /* *INDENT-ON* */
1173  last_run_duration = vlib_time_now (vm) - last_periodic_time;
1174  }
1175  return 0;
1176 }
1177 
1178 /* *INDENT-OFF* */
1180  .function = avf_process,
1181  .type = VLIB_NODE_TYPE_PROCESS,
1182  .name = "avf-process",
1183 };
1184 /* *INDENT-ON* */
1185 
1186 static void
1188 {
1189  avf_main_t *am = &avf_main;
1190  uword pd = vlib_pci_get_private_data (vm, h);
1191  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1192  u32 icr0;
1193 
1194  icr0 = avf_reg_read (ad, AVFINT_ICR0);
1195 
1196  if (ad->flags & AVF_DEVICE_F_ELOG)
1197  {
1198  /* *INDENT-OFF* */
1199  ELOG_TYPE_DECLARE (el) =
1200  {
1201  .format = "avf[%d] irq 0: icr0 0x%x",
1202  .format_args = "i4i4",
1203  };
1204  /* *INDENT-ON* */
1205  struct
1206  {
1207  u32 dev_instance;
1208  u32 icr0;
1209  } *ed;
1210 
1211  ed = ELOG_DATA (&vm->elog_main, el);
1212  ed->dev_instance = ad->dev_instance;
1213  ed->icr0 = icr0;
1214  }
1215 
1216  avf_irq_0_enable (ad);
1217 
1218  /* bit 30 - Send/Receive Admin queue interrupt indication */
1219  if (icr0 & (1 << 30))
1222 }
1223 
1224 static void
1226 {
1227  vnet_main_t *vnm = vnet_get_main ();
1228  avf_main_t *am = &avf_main;
1229  uword pd = vlib_pci_get_private_data (vm, h);
1230  avf_device_t *ad = pool_elt_at_index (am->devices, pd);
1231  u16 qid;
1232  int i;
1233 
1234  if (ad->flags & AVF_DEVICE_F_ELOG)
1235  {
1236  /* *INDENT-OFF* */
1237  ELOG_TYPE_DECLARE (el) =
1238  {
1239  .format = "avf[%d] irq %d: received",
1240  .format_args = "i4i2",
1241  };
1242  /* *INDENT-ON* */
1243  struct
1244  {
1245  u32 dev_instance;
1246  u16 line;
1247  } *ed;
1248 
1249  ed = ELOG_DATA (&vm->elog_main, el);
1250  ed->dev_instance = ad->dev_instance;
1251  ed->line = line;
1252  }
1253 
1254  qid = line - 1;
1255  if (vec_len (ad->rxqs) > qid && ad->rxqs[qid].int_mode != 0)
1257  for (i = 0; i < vec_len (ad->rxqs); i++)
1258  avf_irq_n_enable (ad, i);
1259 }
1260 
1261 void
1263 {
1264  vnet_main_t *vnm = vnet_get_main ();
1265  avf_main_t *am = &avf_main;
1266  int i;
1267 
1268  if (ad->hw_if_index)
1269  {
1273  }
1274 
1276 
1277  vlib_physmem_free (vm, ad->atq);
1278  vlib_physmem_free (vm, ad->arq);
1279  vlib_physmem_free (vm, ad->atq_bufs);
1280  vlib_physmem_free (vm, ad->arq_bufs);
1281 
1282  /* *INDENT-OFF* */
1283  vec_foreach_index (i, ad->rxqs)
1284  {
1285  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
1286  vlib_physmem_free (vm, (void *) rxq->descs);
1287  if (rxq->n_enqueued)
1288  vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
1289  rxq->n_enqueued);
1290  vec_free (rxq->bufs);
1291  }
1292  /* *INDENT-ON* */
1293  vec_free (ad->rxqs);
1294 
1295  /* *INDENT-OFF* */
1296  vec_foreach_index (i, ad->txqs)
1297  {
1298  avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
1299  vlib_physmem_free (vm, (void *) txq->descs);
1300  if (txq->n_enqueued)
1301  {
1302  u16 first = (txq->next - txq->n_enqueued) & (txq->size -1);
1303  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
1304  txq->n_enqueued);
1305  }
1306  vec_free (txq->bufs);
1307  clib_ring_free (txq->rs_slots);
1308  }
1309  /* *INDENT-ON* */
1310  vec_free (ad->txqs);
1311  vec_free (ad->name);
1312 
1313  clib_error_free (ad->error);
1314  clib_memset (ad, 0, sizeof (*ad));
1315  pool_put (am->devices, ad);
1316 }
1317 
1318 void
1320 {
1321  vnet_main_t *vnm = vnet_get_main ();
1322  avf_main_t *am = &avf_main;
1323  avf_device_t *ad;
1325  clib_error_t *error = 0;
1326  int i;
1327 
1328  /* check input args */
1329  args->rxq_size = (args->rxq_size == 0) ? AVF_RXQ_SZ : args->rxq_size;
1330  args->txq_size = (args->txq_size == 0) ? AVF_TXQ_SZ : args->txq_size;
1331 
1332  if ((args->rxq_size & (args->rxq_size - 1))
1333  || (args->txq_size & (args->txq_size - 1)))
1334  {
1335  args->rv = VNET_API_ERROR_INVALID_VALUE;
1336  args->error =
1337  clib_error_return (error, "queue size must be a power of two");
1338  return;
1339  }
1340 
1341  pool_get (am->devices, ad);
1342  ad->dev_instance = ad - am->devices;
1343  ad->per_interface_next_index = ~0;
1344  ad->name = vec_dup (args->name);
1345 
1346  if (args->enable_elog)
1347  ad->flags |= AVF_DEVICE_F_ELOG;
1348 
1349  if ((error = vlib_pci_device_open (vm, &args->addr, avf_pci_device_ids,
1350  &h)))
1351  {
1352  pool_put (am->devices, ad);
1353  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1354  args->error =
1355  clib_error_return (error, "pci-addr %U", format_vlib_pci_addr,
1356  &args->addr);
1357  return;
1358  }
1359  ad->pci_dev_handle = h;
1360  ad->pci_addr = args->addr;
1361  ad->numa_node = vlib_pci_get_numa_node (vm, h);
1362 
1364 
1365  if ((error = vlib_pci_bus_master_enable (vm, h)))
1366  goto error;
1367 
1368  if ((error = vlib_pci_map_region (vm, h, 0, &ad->bar0)))
1369  goto error;
1370 
1371  if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
1372  &avf_irq_0_handler)))
1373  goto error;
1374 
1375  if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
1376  &avf_irq_n_handler)))
1377  goto error;
1378 
1379  if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
1380  goto error;
1381 
1383  AVF_MBOX_LEN,
1385  ad->numa_node);
1386  if (ad->atq == 0)
1387  {
1388  error = vlib_physmem_last_error (vm);
1389  goto error;
1390  }
1391 
1392  if ((error = vlib_pci_map_dma (vm, h, ad->atq)))
1393  goto error;
1394 
1396  AVF_MBOX_LEN,
1398  ad->numa_node);
1399  if (ad->arq == 0)
1400  {
1401  error = vlib_physmem_last_error (vm);
1402  goto error;
1403  }
1404 
1405  if ((error = vlib_pci_map_dma (vm, h, ad->arq)))
1406  goto error;
1407 
1409  AVF_MBOX_LEN,
1411  ad->numa_node);
1412  if (ad->atq_bufs == 0)
1413  {
1414  error = vlib_physmem_last_error (vm);
1415  goto error;
1416  }
1417 
1418  if ((error = vlib_pci_map_dma (vm, h, ad->atq_bufs)))
1419  goto error;
1420 
1422  AVF_MBOX_LEN,
1424  ad->numa_node);
1425  if (ad->arq_bufs == 0)
1426  {
1427  error = vlib_physmem_last_error (vm);
1428  goto error;
1429  }
1430 
1431  if ((error = vlib_pci_map_dma (vm, h, ad->arq_bufs)))
1432  goto error;
1433 
1434  if ((error = vlib_pci_intr_enable (vm, h)))
1435  goto error;
1436 
1438  ad->flags |= AVF_DEVICE_F_VA_DMA;
1439 
1440  if ((error = avf_device_init (vm, am, ad, args)))
1441  goto error;
1442 
1443  /* create interface */
1444  error = ethernet_register_interface (vnm, avf_device_class.index,
1445  ad->dev_instance, ad->hwaddr,
1447 
1448  if (error)
1449  goto error;
1450 
1452  args->sw_if_index = ad->sw_if_index = sw->sw_if_index;
1453 
1457  avf_input_node.index);
1458 
1459  for (i = 0; i < ad->n_rx_queues; i++)
1461 
1462  if (pool_elts (am->devices) == 1)
1465 
1466  return;
1467 
1468 error:
1469  avf_delete_if (vm, ad);
1470  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1471  args->error = clib_error_return (error, "pci-addr %U",
1472  format_vlib_pci_addr, &args->addr);
1473  avf_log_err (ad, "error: %U", format_clib_error, args->error);
1474 }
1475 
1476 static clib_error_t *
1478 {
1479  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1480  avf_main_t *am = &avf_main;
1482  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1483 
1484  if (ad->flags & AVF_DEVICE_F_ERROR)
1485  return clib_error_return (0, "device is in error state");
1486 
1487  if (is_up)
1488  {
1491  ad->flags |= AVF_DEVICE_F_ADMIN_UP;
1492  }
1493  else
1494  {
1496  ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
1497  }
1498  return 0;
1499 }
1500 
1501 static clib_error_t *
1504 {
1505  avf_main_t *am = &avf_main;
1506  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1508  avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
1509 
1511  rxq->int_mode = 0;
1512  else
1513  rxq->int_mode = 1;
1514 
1515  return 0;
1516 }
1517 
1518 static void
1520  u32 node_index)
1521 {
1522  avf_main_t *am = &avf_main;
1523  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1525 
1526  /* Shut off redirection */
1527  if (node_index == ~0)
1528  {
1529  ad->per_interface_next_index = node_index;
1530  return;
1531  }
1532 
1534  vlib_node_add_next (vlib_get_main (), avf_input_node.index, node_index);
1535 }
1536 
1537 static char *avf_tx_func_error_strings[] = {
1538 #define _(n,s) s,
1540 #undef _
1541 };
1542 
1543 static void
1545 {
1546  avf_main_t *am = &avf_main;
1547  avf_device_t *ad = vec_elt_at_index (am->devices, instance);
1549  &ad->eth_stats, sizeof (ad->eth_stats));
1550 }
1551 
1552 /* *INDENT-OFF* */
1554 {
1555  .name = "Adaptive Virtual Function (AVF) interface",
1556  .clear_counters = avf_clear_hw_interface_counters,
1557  .format_device = format_avf_device,
1558  .format_device_name = format_avf_device_name,
1559  .admin_up_down_function = avf_interface_admin_up_down,
1560  .rx_mode_change_function = avf_interface_rx_mode_change,
1561  .rx_redirect_to_node = avf_set_interface_next_node,
1562  .tx_function_n_errors = AVF_TX_N_ERROR,
1563  .tx_function_error_strings = avf_tx_func_error_strings,
1564 };
1565 /* *INDENT-ON* */
1566 
1567 clib_error_t *
1569 {
1570  avf_main_t *am = &avf_main;
1572 
1575 
1576  am->log_class = vlib_log_register_class ("avf", 0);
1577  vlib_log_debug (am->log_class, "initialized");
1578 
1579  return 0;
1580 }
1581 
1582 /* *INDENT-OFF* */
1584 {
1585  .runs_after = VLIB_INITS ("pci_bus_init"),
1586 };
1587 /* *INDENT-OFF* */
1588 
1589 /*
1590  * fd.io coding-style-patch-verification: ON
1591  *
1592  * Local Variables:
1593  * eval: (c-set-style "gnu")
1594  * End:
1595  */
static void avf_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1519
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:176
vmrglw vmrglh hi
u8 int_mode
Definition: avf.h:123
#define vec_foreach_index(var, v)
Iterate over vector indices.
#define AVF_ARQLEN
Definition: virtchnl.h:47
virtchnl_queue_pair_info_t qpair[1]
Definition: virtchnl.h:311
u32 hw_if_index
Definition: avf.h:148
u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
u32 flags
Definition: vhost_user.h:141
#define AVF_ATQH
Definition: virtchnl.h:40
static clib_error_t * avf_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:1502
#define clib_min(x, y)
Definition: clib.h:302
static uword random_default_seed(void)
Default random seed (unix/linux user-mode)
Definition: random.h:91
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static void * vlib_physmem_alloc_aligned_on_numa(vlib_main_t *vm, uword n_bytes, uword alignment, u32 numa_node)
Definition: physmem_funcs.h:63
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
clib_error_t * avf_init(vlib_main_t *vm)
Definition: device.c:1568
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
Definition: node_funcs.h:593
clib_error_t * avf_send_to_pf(vlib_main_t *vm, avf_device_t *ad, virtchnl_ops_t op, void *in, int in_len, void *out, int out_len)
Definition: device.c:384
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:324
#define AVF_ATQBAH
Definition: virtchnl.h:45
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
void avf_arq_slot_init(avf_device_t *ad, u16 slot)
Definition: device.c:327
clib_error_t * error
Definition: avf.h:187
#define avf_log_warn(dev, f,...)
Definition: avf.h:49
#define AVF_AQ_ENQ_SUSPEND_TIME
Definition: avf.h:25
u64 atq_bufs_pa
Definition: avf.h:165
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:421
virtchnl_vsi_type_t vsi_type
Definition: virtchnl.h:165
unsigned long u64
Definition: types.h:89
void vlib_pci_device_close(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1286
virtchnl_vector_map_t vecmap[1]
Definition: virtchnl.h:339
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
#define clib_ring_new_aligned(ring, size, align)
Definition: ring.h:53
virtchnl_link_speed_t link_speed
Definition: avf.h:179
#define AVF_ARQBAH
Definition: virtchnl.h:39
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static void avf_adminq_init(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:347
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define AVF_QRX_TAIL(q)
Definition: virtchnl.h:51
#define AVF_ARQT
Definition: virtchnl.h:43
format_function_t format_avf_device
Definition: avf.h:247
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:560
int i
vlib_pci_addr_t addr
Definition: avf.h:228
#define AVF_AQ_F_SI
Definition: virtchnl.h:61
u32 dev_instance
Definition: avf.h:146
clib_error_t * vlib_pci_enable_msix_irq(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 start, u16 count)
Definition: pci.c:894
#define AVF_QTX_TAIL(q)
Definition: virtchnl.h:50
u8 data[128]
Definition: ipsec.api:251
virtchnl_link_speed_t
Definition: virtchnl.h:205
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:450
avf_device_t * devices
Definition: avf.h:218
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
volatile u32 * qtx_tail
Definition: avf.h:130
clib_error_t * avf_op_config_irq_map(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:684
static vlib_node_registration_t avf_process_node
(constructor) VLIB_REGISTER_NODE (avf_process_node)
Definition: device.c:1179
#define AVF_ATQLEN
Definition: virtchnl.h:41
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1092
unsigned char u8
Definition: types.h:56
vnet_device_class_t avf_device_class
#define AVF_ARQH
Definition: virtchnl.h:44
clib_error_t * avf_device_reset(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:778
clib_error_t * avf_op_disable_vlan_stripping(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:600
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
virtchnl_ops_t
Definition: virtchnl.h:103
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 buffer_pool_index
Definition: avf.h:124
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:422
static uword avf_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: device.c:1129
#define AVF_AQ_F_DD
Definition: virtchnl.h:53
vnet_hw_interface_rx_mode
Definition: interface.h:53
u16 * rs_slots
Definition: avf.h:137
#define AVFINT_ICR0_ENA1
Definition: virtchnl.h:37
u8 * format_ethernet_address(u8 *s, va_list *args)
Definition: format.c:44
clib_error_t * avf_request_queues(vlib_main_t *vm, avf_device_t *ad, u16 num_queue_pairs)
Definition: device.c:809
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
clib_error_t * vlib_pci_map_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h, void *ptr)
Definition: pci.c:1207
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
clib_error_t * avf_config_promisc_mode(vlib_main_t *vm, avf_device_t *ad, int is_enable)
Definition: device.c:609
clib_spinlock_t lock
Definition: avf.h:133
static uword pow2_mask(uword x)
Definition: clib.h:221
#define PCI_DEVICE_ID_INTEL_AVF
Definition: device.c:33
static u32 avf_reg_read(avf_device_t *ad, u32 addr)
Definition: avf.h:298
clib_error_t * avf_txq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 txq_size)
Definition: device.c:281
static_always_inline void vnet_device_input_set_interrupt_pending(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.h:136
vnet_hw_interface_flags_t flags
Definition: interface.h:506
#define AVF_MBOX_BUF_SZ
Definition: device.c:27
volatile u32 * qrx_tail
Definition: avf.h:117
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
Definition: error.h:99
unsigned int u32
Definition: types.h:88
vlib_pci_dev_handle_t pci_dev_handle
Definition: avf.h:149
#define vlib_log_debug(...)
Definition: log.h:106
virtchnl_ether_addr_t list[1]
Definition: virtchnl.h:354
void * arq_bufs
Definition: avf.h:164
avf_main_t avf_main
Definition: device.c:37
static void avf_irq_n_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1225
avf_aq_desc_t * arq
Definition: avf.h:162
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:912
static heap_elt_t * first(heap_header_t *h)
Definition: heap.c:59
void avf_process_one_device(vlib_main_t *vm, avf_device_t *ad, int is_irq)
Definition: device.c:962
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
u8 * name
Definition: avf.h:152
virtchnl_eth_stats_t last_cleared_eth_stats
Definition: avf.h:184
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
clib_error_t * avf_device_init(vlib_main_t *vm, avf_main_t *am, avf_device_t *ad, avf_create_if_args_t *args)
Definition: device.c:851
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
static u32 avf_get_u32(void *start, int offset)
Definition: avf.h:252
virtchnl_txq_info_t txq
Definition: virtchnl.h:302
#define AVF_ATQT
Definition: virtchnl.h:48
unsigned short u16
Definition: types.h:57
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define vec_dup(V)
Return copy of vector (no header, no alignment)
Definition: vec.h:375
clib_error_t * vlib_pci_register_msix_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 start, u32 count, pci_msix_handler_function_t *msix_handler)
Definition: pci.c:837
u64 qword[4]
Definition: avf.h:92
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define AVF_AQ_F_RD
Definition: virtchnl.h:58
#define VIRTCHNL_VERSION_MAJOR
Definition: virtchnl.h:21
clib_error_t * avf_op_enable_queues(vlib_main_t *vm, avf_device_t *ad, u32 rx, u32 tx)
Definition: device.c:738
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
#define AVF_ITR_INT
Definition: device.c:30
static void avf_reg_flush(avf_device_t *ad)
Definition: avf.h:304
#define AVF_RXQ_SZ
Definition: device.c:28
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
static char * avf_tx_func_error_strings[]
Definition: device.c:1537
#define AVF_MBOX_LEN
Definition: device.c:26
#define AVFINT_ICR0
Definition: virtchnl.h:36
u8 len
Definition: ip_types.api:90
void avf_delete_if(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:1262
u16 n_rx_queues
Definition: avf.h:158
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
u8 hwaddr[6]
Definition: avf.h:173
u16 atq_next_slot
Definition: avf.h:167
u32 vlib_pci_get_numa_node(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:170
static u32 avf_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:1101
static void avf_irq_0_enable(avf_device_t *ad)
Definition: device.c:71
#define AVF_AQ_F_BUF
Definition: virtchnl.h:60
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u32 numa_node
Definition: avf.h:150
vlib_main_t * vm
Definition: buffer.c:323
static void vlib_physmem_free(vlib_main_t *vm, void *p)
Definition: physmem_funcs.h:89
vlib_node_registration_t avf_input_node
(constructor) VLIB_REGISTER_NODE (avf_input_node)
Definition: input.c:463
#define avf_log_debug(dev, f,...)
Definition: avf.h:54
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static void avf_irq_0_disable(avf_device_t *ad)
Definition: device.c:59
struct virtchnl_pf_event_t::@444::@445 link_event
Definition: avf.h:114
clib_error_t * avf_op_get_stats(vlib_main_t *vm, avf_device_t *ad, virtchnl_eth_stats_t *es)
Definition: device.c:764
vlib_log_class_t log_class
Definition: avf.h:221
elog_main_t elog_main
Definition: main.h:193
avf_tx_desc_t * descs
Definition: avf.h:134
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:141
#define ARRAY_LEN(x)
Definition: clib.h:63
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
clib_error_t * avf_aq_desc_enq(vlib_main_t *vm, avf_device_t *ad, avf_aq_desc_t *dt, void *data, int len)
Definition: device.c:113
virtchnl_ops_t v_opcode
Definition: virtchnl.h:249
#define AVFINT_DYN_CTL0
Definition: virtchnl.h:38
static void avf_clear_hw_interface_counters(u32 instance)
Definition: device.c:1544
u16 vsi_id
Definition: avf.h:171
vl_api_vxlan_gbp_api_tunnel_mode_t mode
Definition: vxlan_gbp.api:44
u32 per_interface_next_index
Definition: avf.h:144
clib_error_t * avf_op_add_eth_addr(vlib_main_t *vm, avf_device_t *ad, u8 count, u8 *macs)
Definition: device.c:710
static clib_error_t * avf_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1477
u32 feature_bitmap
Definition: avf.h:172
virtchnl_status_code_t v_retval
Definition: virtchnl.h:254
u32 * bufs
Definition: avf.h:135
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:523
#define ASSERT(truth)
avf_aq_desc_t * atq
Definition: avf.h:161
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:139
u32 flags
Definition: avf.h:143
#define PCI_DEVICE_ID_INTEL_X722_VF
Definition: device.c:35
#define AVFINT_DYN_CTLN(x)
Definition: virtchnl.h:35
Definition: avf.h:127
u32 * bufs
Definition: avf.h:121
clib_error_t * avf_op_config_rss_lut(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:553
static void avf_irq_n_enable(avf_device_t *ad, u8 line)
Definition: device.c:98
void * bar0
Definition: avf.h:151
static const char * virtchnl_link_speed_str[]
Definition: device.c:52
#define PCI_DEVICE_ID_INTEL_X710_VF
Definition: device.c:34
u16 n_enqueued
Definition: avf.h:136
u16 n_enqueued
Definition: avf.h:122
VNET_DEVICE_CLASS(bond_dev_class)
u8 * format_hex_bytes_no_wrap(u8 *s, va_list *va)
Definition: std-formats.c:112
virtchnl_pf_event_t * events
Definition: avf.h:169
size_t count
Definition: vapi.c:47
virtchnl_event_codes_t event
Definition: virtchnl.h:215
#define AVF_AQ_ENQ_MAX_WAIT_TIME
Definition: avf.h:26
static void avf_reg_write(avf_device_t *ad, u32 addr, u32 val)
Definition: avf.h:292
clib_error_t * avf_op_version(vlib_main_t *vm, avf_device_t *ad, virtchnl_version_info_t *ver)
Definition: device.c:494
static uword pointer_to_uword(const void *p)
Definition: types.h:131
static uword avf_dma_addr(vlib_main_t *vm, avf_device_t *ad, void *p)
Definition: device.c:340
#define clib_max(x, y)
Definition: clib.h:295
virtchnl_eth_stats_t eth_stats
Definition: avf.h:183
void * atq_bufs
Definition: avf.h:163
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define AVFGEN_RSTAT
Definition: virtchnl.h:49
u16 num_queue_pairs
Definition: avf.h:174
u16 next
Definition: avf.h:131
virtchnl_rxq_info_t rxq
Definition: virtchnl.h:303
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define AVF_ARQBAL
Definition: virtchnl.h:42
u32 rss_lut_size
Definition: avf.h:178
u16 n_tx_queues
Definition: avf.h:157
#define AVF_AQ_F_CMP
Definition: virtchnl.h:54
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:278
u32 instance
Definition: gre.api:48
format_function_t format_avf_device_name
Definition: avf.h:248
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:492
#define foreach_virtchnl_op
Definition: virtchnl.h:66
VLIB buffer representation.
Definition: buffer.h:102
#define foreach_avf_tx_func_error
Definition: avf.h:330
u64 uword
Definition: types.h:112
clib_error_t * avf_rxq_init(vlib_main_t *vm, avf_device_t *ad, u16 qid, u16 rxq_size)
Definition: device.c:230
u16 size
Definition: avf.h:119
u16 arq_next_slot
Definition: avf.h:168
#define clib_error_free(e)
Definition: error.h:86
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1157
avf_rxq_t * rxqs
Definition: avf.h:155
virtchnl_vsi_resource_t vsi_res[1]
Definition: virtchnl.h:179
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:188
#define AVF_TXQ_SZ
Definition: device.c:29
int vlib_pci_supports_virtual_addr_dma(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:1218
clib_error_t * error
Definition: avf.h:237
static u32 random_u32(u32 *seed)
32-bit random number generator
Definition: random.h:69
avf_per_thread_data_t * per_thread_data
Definition: avf.h:219
static uword vlib_buffer_get_va(vlib_buffer_t *b)
Definition: buffer.h:217
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
u16 size
Definition: avf.h:132
vlib_pci_addr_t pci_addr
Definition: avf.h:180
u32 sw_if_index
Definition: avf.h:147
#define ETHERNET_MAX_PACKET_BYTES
Definition: ethernet.h:133
#define vec_foreach(var, vec)
Vector iterator.
#define vlib_log_err(...)
Definition: log.h:102
u64 arq_bufs_pa
Definition: avf.h:166
#define ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC(flags)
Definition: ethernet.h:142
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:116
void avf_create_if(vlib_main_t *vm, avf_create_if_args_t *args)
Definition: device.c:1319
#define avf_log_err(dev, f,...)
Definition: avf.h:44
#define AVF_ATQBAL
Definition: virtchnl.h:46
static const char * virtchnl_event_names[]
Definition: device.c:46
static void avf_irq_0_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h, u16 line)
Definition: device.c:1187
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
union virtchnl_pf_event_t::@444 event_data
#define VIRTCHNL_VERSION_MINOR
Definition: virtchnl.h:22
static void avf_irq_n_disable(avf_device_t *ad, u8 line)
Definition: device.c:89
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
Definition: buffer_funcs.h:163
#define clib_ring_free(f)
Definition: ring.h:59
#define PCI_VENDOR_ID_INTEL
Definition: device.c:32
clib_error_t * avf_cmd_rx_ctl_reg_write(vlib_main_t *vm, avf_device_t *ad, u32 reg, u32 val)
Definition: device.c:199
clib_error_t * vlib_pci_device_open(vlib_main_t *vm, vlib_pci_addr_t *addr, pci_device_id_t ids[], vlib_pci_dev_handle_t *handle)
Definition: pci.c:1226
u16 next
Definition: avf.h:118
#define VLIB_INITS(...)
Definition: init.h:344
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
clib_error_t * avf_op_config_rss_key(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:576
avf_txq_t * txqs
Definition: avf.h:156
avf_rx_desc_t * descs
Definition: avf.h:120
clib_error_t * avf_op_config_vsi_queues(vlib_main_t *vm, avf_device_t *ad)
Definition: device.c:628
u16 vendor_id
Definition: pci.h:127
format_function_t format_vlib_pci_addr
Definition: pci.h:324
#define AVF_AQ_F_ERR
Definition: virtchnl.h:55
u16 max_vectors
Definition: avf.h:175
clib_error_t * avf_op_get_vf_resources(vlib_main_t *vm, avf_device_t *ad, virtchnl_vf_resource_t *res)
Definition: device.c:516
u32 rss_key_size
Definition: avf.h:177
u16 max_mtu
Definition: avf.h:176
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128