1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25 "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28 *
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
31 *
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
34 */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 /* required last entry */
41 {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53
iavf_status_to_errno(enum iavf_status status)54 int iavf_status_to_errno(enum iavf_status status)
55 {
56 switch (status) {
57 case IAVF_SUCCESS:
58 return 0;
59 case IAVF_ERR_PARAM:
60 case IAVF_ERR_MAC_TYPE:
61 case IAVF_ERR_INVALID_MAC_ADDR:
62 case IAVF_ERR_INVALID_LINK_SETTINGS:
63 case IAVF_ERR_INVALID_PD_ID:
64 case IAVF_ERR_INVALID_QP_ID:
65 case IAVF_ERR_INVALID_CQ_ID:
66 case IAVF_ERR_INVALID_CEQ_ID:
67 case IAVF_ERR_INVALID_AEQ_ID:
68 case IAVF_ERR_INVALID_SIZE:
69 case IAVF_ERR_INVALID_ARP_INDEX:
70 case IAVF_ERR_INVALID_FPM_FUNC_ID:
71 case IAVF_ERR_QP_INVALID_MSG_SIZE:
72 case IAVF_ERR_INVALID_FRAG_COUNT:
73 case IAVF_ERR_INVALID_ALIGNMENT:
74 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
75 case IAVF_ERR_INVALID_IMM_DATA_SIZE:
76 case IAVF_ERR_INVALID_VF_ID:
77 case IAVF_ERR_INVALID_HMCFN_ID:
78 case IAVF_ERR_INVALID_PBLE_INDEX:
79 case IAVF_ERR_INVALID_SD_INDEX:
80 case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
81 case IAVF_ERR_INVALID_SD_TYPE:
82 case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
83 case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
84 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
85 return -EINVAL;
86 case IAVF_ERR_NVM:
87 case IAVF_ERR_NVM_CHECKSUM:
88 case IAVF_ERR_PHY:
89 case IAVF_ERR_CONFIG:
90 case IAVF_ERR_UNKNOWN_PHY:
91 case IAVF_ERR_LINK_SETUP:
92 case IAVF_ERR_ADAPTER_STOPPED:
93 case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
94 case IAVF_ERR_AUTONEG_NOT_COMPLETE:
95 case IAVF_ERR_RESET_FAILED:
96 case IAVF_ERR_BAD_PTR:
97 case IAVF_ERR_SWFW_SYNC:
98 case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
99 case IAVF_ERR_QUEUE_EMPTY:
100 case IAVF_ERR_FLUSHED_QUEUE:
101 case IAVF_ERR_OPCODE_MISMATCH:
102 case IAVF_ERR_CQP_COMPL_ERROR:
103 case IAVF_ERR_BACKING_PAGE_ERROR:
104 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
105 case IAVF_ERR_MEMCPY_FAILED:
106 case IAVF_ERR_SRQ_ENABLED:
107 case IAVF_ERR_ADMIN_QUEUE_ERROR:
108 case IAVF_ERR_ADMIN_QUEUE_FULL:
109 case IAVF_ERR_BAD_IWARP_CQE:
110 case IAVF_ERR_NVM_BLANK_MODE:
111 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
112 case IAVF_ERR_DIAG_TEST_FAILED:
113 case IAVF_ERR_FIRMWARE_API_VERSION:
114 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
115 return -EIO;
116 case IAVF_ERR_DEVICE_NOT_SUPPORTED:
117 return -ENODEV;
118 case IAVF_ERR_NO_AVAILABLE_VSI:
119 case IAVF_ERR_RING_FULL:
120 return -ENOSPC;
121 case IAVF_ERR_NO_MEMORY:
122 return -ENOMEM;
123 case IAVF_ERR_TIMEOUT:
124 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
125 return -ETIMEDOUT;
126 case IAVF_ERR_NOT_IMPLEMENTED:
127 case IAVF_NOT_SUPPORTED:
128 return -EOPNOTSUPP;
129 case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
130 return -EALREADY;
131 case IAVF_ERR_NOT_READY:
132 return -EBUSY;
133 case IAVF_ERR_BUF_TOO_SHORT:
134 return -EMSGSIZE;
135 }
136
137 return -EIO;
138 }
139
virtchnl_status_to_errno(enum virtchnl_status_code v_status)140 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
141 {
142 switch (v_status) {
143 case VIRTCHNL_STATUS_SUCCESS:
144 return 0;
145 case VIRTCHNL_STATUS_ERR_PARAM:
146 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
147 return -EINVAL;
148 case VIRTCHNL_STATUS_ERR_NO_MEMORY:
149 return -ENOMEM;
150 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
151 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
152 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
153 return -EIO;
154 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
155 return -EOPNOTSUPP;
156 }
157
158 return -EIO;
159 }
160
161 /**
162 * iavf_pdev_to_adapter - go from pci_dev to adapter
163 * @pdev: pci_dev pointer
164 */
iavf_pdev_to_adapter(struct pci_dev * pdev)165 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
166 {
167 return netdev_priv(pci_get_drvdata(pdev));
168 }
169
170 /**
171 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
172 * @hw: pointer to the HW structure
173 * @mem: ptr to mem struct to fill out
174 * @size: size of memory requested
175 * @alignment: what to align the allocation to
176 **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)177 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
178 struct iavf_dma_mem *mem,
179 u64 size, u32 alignment)
180 {
181 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
182
183 if (!mem)
184 return IAVF_ERR_PARAM;
185
186 mem->size = ALIGN(size, alignment);
187 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
188 (dma_addr_t *)&mem->pa, GFP_KERNEL);
189 if (mem->va)
190 return 0;
191 else
192 return IAVF_ERR_NO_MEMORY;
193 }
194
195 /**
196 * iavf_free_dma_mem_d - OS specific memory free for shared code
197 * @hw: pointer to the HW structure
198 * @mem: ptr to mem struct to free
199 **/
iavf_free_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem)200 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
201 struct iavf_dma_mem *mem)
202 {
203 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
204
205 if (!mem || !mem->va)
206 return IAVF_ERR_PARAM;
207 dma_free_coherent(&adapter->pdev->dev, mem->size,
208 mem->va, (dma_addr_t)mem->pa);
209 return 0;
210 }
211
212 /**
213 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
214 * @hw: pointer to the HW structure
215 * @mem: ptr to mem struct to fill out
216 * @size: size of memory requested
217 **/
iavf_allocate_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)218 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
219 struct iavf_virt_mem *mem, u32 size)
220 {
221 if (!mem)
222 return IAVF_ERR_PARAM;
223
224 mem->size = size;
225 mem->va = kzalloc(size, GFP_KERNEL);
226
227 if (mem->va)
228 return 0;
229 else
230 return IAVF_ERR_NO_MEMORY;
231 }
232
233 /**
234 * iavf_free_virt_mem_d - OS specific memory free for shared code
235 * @hw: pointer to the HW structure
236 * @mem: ptr to mem struct to free
237 **/
iavf_free_virt_mem_d(struct iavf_hw * hw,struct iavf_virt_mem * mem)238 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
239 struct iavf_virt_mem *mem)
240 {
241 if (!mem)
242 return IAVF_ERR_PARAM;
243
244 /* it's ok to kfree a NULL pointer */
245 kfree(mem->va);
246
247 return 0;
248 }
249
250 /**
251 * iavf_lock_timeout - try to lock mutex but give up after timeout
252 * @lock: mutex that should be locked
253 * @msecs: timeout in msecs
254 *
255 * Returns 0 on success, negative on failure
256 **/
iavf_lock_timeout(struct mutex * lock,unsigned int msecs)257 int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
258 {
259 unsigned int wait, delay = 10;
260
261 for (wait = 0; wait < msecs; wait += delay) {
262 if (mutex_trylock(lock))
263 return 0;
264
265 msleep(delay);
266 }
267
268 return -1;
269 }
270
271 /**
272 * iavf_schedule_reset - Set the flags and schedule a reset event
273 * @adapter: board private structure
274 **/
iavf_schedule_reset(struct iavf_adapter * adapter)275 void iavf_schedule_reset(struct iavf_adapter *adapter)
276 {
277 if (!(adapter->flags &
278 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
279 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
280 queue_work(iavf_wq, &adapter->reset_task);
281 }
282 }
283
284 /**
285 * iavf_schedule_request_stats - Set the flags and schedule statistics request
286 * @adapter: board private structure
287 *
288 * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
289 * request and refresh ethtool stats
290 **/
iavf_schedule_request_stats(struct iavf_adapter * adapter)291 void iavf_schedule_request_stats(struct iavf_adapter *adapter)
292 {
293 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
294 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
295 }
296
297 /**
298 * iavf_tx_timeout - Respond to a Tx Hang
299 * @netdev: network interface device structure
300 * @txqueue: queue number that is timing out
301 **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)302 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
303 {
304 struct iavf_adapter *adapter = netdev_priv(netdev);
305
306 adapter->tx_timeout_count++;
307 iavf_schedule_reset(adapter);
308 }
309
310 /**
311 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
312 * @adapter: board private structure
313 **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)314 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
315 {
316 struct iavf_hw *hw = &adapter->hw;
317
318 if (!adapter->msix_entries)
319 return;
320
321 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
322
323 iavf_flush(hw);
324
325 synchronize_irq(adapter->msix_entries[0].vector);
326 }
327
328 /**
329 * iavf_misc_irq_enable - Enable default interrupt generation settings
330 * @adapter: board private structure
331 **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)332 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
333 {
334 struct iavf_hw *hw = &adapter->hw;
335
336 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
337 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
338 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
339
340 iavf_flush(hw);
341 }
342
343 /**
344 * iavf_irq_disable - Mask off interrupt generation on the NIC
345 * @adapter: board private structure
346 **/
iavf_irq_disable(struct iavf_adapter * adapter)347 static void iavf_irq_disable(struct iavf_adapter *adapter)
348 {
349 int i;
350 struct iavf_hw *hw = &adapter->hw;
351
352 if (!adapter->msix_entries)
353 return;
354
355 for (i = 1; i < adapter->num_msix_vectors; i++) {
356 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
357 synchronize_irq(adapter->msix_entries[i].vector);
358 }
359 iavf_flush(hw);
360 }
361
362 /**
363 * iavf_irq_enable_queues - Enable interrupt for specified queues
364 * @adapter: board private structure
365 * @mask: bitmap of queues to enable
366 **/
iavf_irq_enable_queues(struct iavf_adapter * adapter,u32 mask)367 void iavf_irq_enable_queues(struct iavf_adapter *adapter, u32 mask)
368 {
369 struct iavf_hw *hw = &adapter->hw;
370 int i;
371
372 for (i = 1; i < adapter->num_msix_vectors; i++) {
373 if (mask & BIT(i - 1)) {
374 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
375 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
376 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
377 }
378 }
379 }
380
381 /**
382 * iavf_irq_enable - Enable default interrupt generation settings
383 * @adapter: board private structure
384 * @flush: boolean value whether to run rd32()
385 **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)386 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
387 {
388 struct iavf_hw *hw = &adapter->hw;
389
390 iavf_misc_irq_enable(adapter);
391 iavf_irq_enable_queues(adapter, ~0);
392
393 if (flush)
394 iavf_flush(hw);
395 }
396
397 /**
398 * iavf_msix_aq - Interrupt handler for vector 0
399 * @irq: interrupt number
400 * @data: pointer to netdev
401 **/
iavf_msix_aq(int irq,void * data)402 static irqreturn_t iavf_msix_aq(int irq, void *data)
403 {
404 struct net_device *netdev = data;
405 struct iavf_adapter *adapter = netdev_priv(netdev);
406 struct iavf_hw *hw = &adapter->hw;
407
408 /* handle non-queue interrupts, these reads clear the registers */
409 rd32(hw, IAVF_VFINT_ICR01);
410 rd32(hw, IAVF_VFINT_ICR0_ENA1);
411
412 if (adapter->state != __IAVF_REMOVE)
413 /* schedule work on the private workqueue */
414 queue_work(iavf_wq, &adapter->adminq_task);
415
416 return IRQ_HANDLED;
417 }
418
419 /**
420 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
421 * @irq: interrupt number
422 * @data: pointer to a q_vector
423 **/
iavf_msix_clean_rings(int irq,void * data)424 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
425 {
426 struct iavf_q_vector *q_vector = data;
427
428 if (!q_vector->tx.ring && !q_vector->rx.ring)
429 return IRQ_HANDLED;
430
431 napi_schedule_irqoff(&q_vector->napi);
432
433 return IRQ_HANDLED;
434 }
435
436 /**
437 * iavf_map_vector_to_rxq - associate irqs with rx queues
438 * @adapter: board private structure
439 * @v_idx: interrupt number
440 * @r_idx: queue number
441 **/
442 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)443 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
444 {
445 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
446 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
447 struct iavf_hw *hw = &adapter->hw;
448
449 rx_ring->q_vector = q_vector;
450 rx_ring->next = q_vector->rx.ring;
451 rx_ring->vsi = &adapter->vsi;
452 q_vector->rx.ring = rx_ring;
453 q_vector->rx.count++;
454 q_vector->rx.next_update = jiffies + 1;
455 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
456 q_vector->ring_mask |= BIT(r_idx);
457 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
458 q_vector->rx.current_itr >> 1);
459 q_vector->rx.current_itr = q_vector->rx.target_itr;
460 }
461
462 /**
463 * iavf_map_vector_to_txq - associate irqs with tx queues
464 * @adapter: board private structure
465 * @v_idx: interrupt number
466 * @t_idx: queue number
467 **/
468 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)469 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
470 {
471 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
472 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
473 struct iavf_hw *hw = &adapter->hw;
474
475 tx_ring->q_vector = q_vector;
476 tx_ring->next = q_vector->tx.ring;
477 tx_ring->vsi = &adapter->vsi;
478 q_vector->tx.ring = tx_ring;
479 q_vector->tx.count++;
480 q_vector->tx.next_update = jiffies + 1;
481 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
482 q_vector->num_ringpairs++;
483 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
484 q_vector->tx.target_itr >> 1);
485 q_vector->tx.current_itr = q_vector->tx.target_itr;
486 }
487
488 /**
489 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
490 * @adapter: board private structure to initialize
491 *
492 * This function maps descriptor rings to the queue-specific vectors
493 * we were allotted through the MSI-X enabling code. Ideally, we'd have
494 * one vector per ring/queue, but on a constrained vector budget, we
495 * group the rings as "efficiently" as possible. You would add new
496 * mapping configurations in here.
497 **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)498 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
499 {
500 int rings_remaining = adapter->num_active_queues;
501 int ridx = 0, vidx = 0;
502 int q_vectors;
503
504 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
505
506 for (; ridx < rings_remaining; ridx++) {
507 iavf_map_vector_to_rxq(adapter, vidx, ridx);
508 iavf_map_vector_to_txq(adapter, vidx, ridx);
509
510 /* In the case where we have more queues than vectors, continue
511 * round-robin on vectors until all queues are mapped.
512 */
513 if (++vidx >= q_vectors)
514 vidx = 0;
515 }
516
517 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
518 }
519
520 /**
521 * iavf_irq_affinity_notify - Callback for affinity changes
522 * @notify: context as to what irq was changed
523 * @mask: the new affinity mask
524 *
525 * This is a callback function used by the irq_set_affinity_notifier function
526 * so that we may register to receive changes to the irq affinity masks.
527 **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)528 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
529 const cpumask_t *mask)
530 {
531 struct iavf_q_vector *q_vector =
532 container_of(notify, struct iavf_q_vector, affinity_notify);
533
534 cpumask_copy(&q_vector->affinity_mask, mask);
535 }
536
537 /**
538 * iavf_irq_affinity_release - Callback for affinity notifier release
539 * @ref: internal core kernel usage
540 *
541 * This is a callback function used by the irq_set_affinity_notifier function
542 * to inform the current notification subscriber that they will no longer
543 * receive notifications.
544 **/
iavf_irq_affinity_release(struct kref * ref)545 static void iavf_irq_affinity_release(struct kref *ref) {}
546
547 /**
548 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
549 * @adapter: board private structure
550 * @basename: device basename
551 *
552 * Allocates MSI-X vectors for tx and rx handling, and requests
553 * interrupts from the kernel.
554 **/
555 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)556 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
557 {
558 unsigned int vector, q_vectors;
559 unsigned int rx_int_idx = 0, tx_int_idx = 0;
560 int irq_num, err;
561 int cpu;
562
563 iavf_irq_disable(adapter);
564 /* Decrement for Other and TCP Timer vectors */
565 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
566
567 for (vector = 0; vector < q_vectors; vector++) {
568 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
569
570 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
571
572 if (q_vector->tx.ring && q_vector->rx.ring) {
573 snprintf(q_vector->name, sizeof(q_vector->name),
574 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
575 tx_int_idx++;
576 } else if (q_vector->rx.ring) {
577 snprintf(q_vector->name, sizeof(q_vector->name),
578 "iavf-%s-rx-%u", basename, rx_int_idx++);
579 } else if (q_vector->tx.ring) {
580 snprintf(q_vector->name, sizeof(q_vector->name),
581 "iavf-%s-tx-%u", basename, tx_int_idx++);
582 } else {
583 /* skip this unused q_vector */
584 continue;
585 }
586 err = request_irq(irq_num,
587 iavf_msix_clean_rings,
588 0,
589 q_vector->name,
590 q_vector);
591 if (err) {
592 dev_info(&adapter->pdev->dev,
593 "Request_irq failed, error: %d\n", err);
594 goto free_queue_irqs;
595 }
596 /* register for affinity change notifications */
597 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
598 q_vector->affinity_notify.release =
599 iavf_irq_affinity_release;
600 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
601 /* Spread the IRQ affinity hints across online CPUs. Note that
602 * get_cpu_mask returns a mask with a permanent lifetime so
603 * it's safe to use as a hint for irq_update_affinity_hint.
604 */
605 cpu = cpumask_local_spread(q_vector->v_idx, -1);
606 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
607 }
608
609 return 0;
610
611 free_queue_irqs:
612 while (vector) {
613 vector--;
614 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
615 irq_set_affinity_notifier(irq_num, NULL);
616 irq_update_affinity_hint(irq_num, NULL);
617 free_irq(irq_num, &adapter->q_vectors[vector]);
618 }
619 return err;
620 }
621
622 /**
623 * iavf_request_misc_irq - Initialize MSI-X interrupts
624 * @adapter: board private structure
625 *
626 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
627 * vector is only for the admin queue, and stays active even when the netdev
628 * is closed.
629 **/
iavf_request_misc_irq(struct iavf_adapter * adapter)630 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
631 {
632 struct net_device *netdev = adapter->netdev;
633 int err;
634
635 snprintf(adapter->misc_vector_name,
636 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
637 dev_name(&adapter->pdev->dev));
638 err = request_irq(adapter->msix_entries[0].vector,
639 &iavf_msix_aq, 0,
640 adapter->misc_vector_name, netdev);
641 if (err) {
642 dev_err(&adapter->pdev->dev,
643 "request_irq for %s failed: %d\n",
644 adapter->misc_vector_name, err);
645 free_irq(adapter->msix_entries[0].vector, netdev);
646 }
647 return err;
648 }
649
650 /**
651 * iavf_free_traffic_irqs - Free MSI-X interrupts
652 * @adapter: board private structure
653 *
654 * Frees all MSI-X vectors other than 0.
655 **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)656 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
657 {
658 int vector, irq_num, q_vectors;
659
660 if (!adapter->msix_entries)
661 return;
662
663 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
664
665 for (vector = 0; vector < q_vectors; vector++) {
666 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
667 irq_set_affinity_notifier(irq_num, NULL);
668 irq_update_affinity_hint(irq_num, NULL);
669 free_irq(irq_num, &adapter->q_vectors[vector]);
670 }
671 }
672
673 /**
674 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
675 * @adapter: board private structure
676 *
677 * Frees MSI-X vector 0.
678 **/
iavf_free_misc_irq(struct iavf_adapter * adapter)679 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
680 {
681 struct net_device *netdev = adapter->netdev;
682
683 if (!adapter->msix_entries)
684 return;
685
686 free_irq(adapter->msix_entries[0].vector, netdev);
687 }
688
689 /**
690 * iavf_configure_tx - Configure Transmit Unit after Reset
691 * @adapter: board private structure
692 *
693 * Configure the Tx unit of the MAC after a reset.
694 **/
iavf_configure_tx(struct iavf_adapter * adapter)695 static void iavf_configure_tx(struct iavf_adapter *adapter)
696 {
697 struct iavf_hw *hw = &adapter->hw;
698 int i;
699
700 for (i = 0; i < adapter->num_active_queues; i++)
701 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
702 }
703
704 /**
705 * iavf_configure_rx - Configure Receive Unit after Reset
706 * @adapter: board private structure
707 *
708 * Configure the Rx unit of the MAC after a reset.
709 **/
iavf_configure_rx(struct iavf_adapter * adapter)710 static void iavf_configure_rx(struct iavf_adapter *adapter)
711 {
712 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
713 struct iavf_hw *hw = &adapter->hw;
714 int i;
715
716 /* Legacy Rx will always default to a 2048 buffer size. */
717 #if (PAGE_SIZE < 8192)
718 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
719 struct net_device *netdev = adapter->netdev;
720
721 /* For jumbo frames on systems with 4K pages we have to use
722 * an order 1 page, so we might as well increase the size
723 * of our Rx buffer to make better use of the available space
724 */
725 rx_buf_len = IAVF_RXBUFFER_3072;
726
727 /* We use a 1536 buffer size for configurations with
728 * standard Ethernet mtu. On x86 this gives us enough room
729 * for shared info and 192 bytes of padding.
730 */
731 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
732 (netdev->mtu <= ETH_DATA_LEN))
733 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
734 }
735 #endif
736
737 for (i = 0; i < adapter->num_active_queues; i++) {
738 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
739 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
740
741 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
742 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
743 else
744 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
745 }
746 }
747
748 /**
749 * iavf_find_vlan - Search filter list for specific vlan filter
750 * @adapter: board private structure
751 * @vlan: vlan tag
752 *
753 * Returns ptr to the filter object or NULL. Must be called while holding the
754 * mac_vlan_list_lock.
755 **/
756 static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)757 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
758 struct iavf_vlan vlan)
759 {
760 struct iavf_vlan_filter *f;
761
762 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
763 if (f->vlan.vid == vlan.vid &&
764 f->vlan.tpid == vlan.tpid)
765 return f;
766 }
767
768 return NULL;
769 }
770
771 /**
772 * iavf_add_vlan - Add a vlan filter to the list
773 * @adapter: board private structure
774 * @vlan: VLAN tag
775 *
776 * Returns ptr to the filter object or NULL when no memory available.
777 **/
778 static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)779 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
780 struct iavf_vlan vlan)
781 {
782 struct iavf_vlan_filter *f = NULL;
783
784 spin_lock_bh(&adapter->mac_vlan_list_lock);
785
786 f = iavf_find_vlan(adapter, vlan);
787 if (!f) {
788 f = kzalloc(sizeof(*f), GFP_ATOMIC);
789 if (!f)
790 goto clearout;
791
792 f->vlan = vlan;
793
794 list_add_tail(&f->list, &adapter->vlan_filter_list);
795 f->add = true;
796 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
797 }
798
799 clearout:
800 spin_unlock_bh(&adapter->mac_vlan_list_lock);
801 return f;
802 }
803
804 /**
805 * iavf_del_vlan - Remove a vlan filter from the list
806 * @adapter: board private structure
807 * @vlan: VLAN tag
808 **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)809 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
810 {
811 struct iavf_vlan_filter *f;
812
813 spin_lock_bh(&adapter->mac_vlan_list_lock);
814
815 f = iavf_find_vlan(adapter, vlan);
816 if (f) {
817 f->remove = true;
818 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
819 }
820
821 spin_unlock_bh(&adapter->mac_vlan_list_lock);
822 }
823
824 /**
825 * iavf_restore_filters
826 * @adapter: board private structure
827 *
828 * Restore existing non MAC filters when VF netdev comes back up
829 **/
iavf_restore_filters(struct iavf_adapter * adapter)830 static void iavf_restore_filters(struct iavf_adapter *adapter)
831 {
832 u16 vid;
833
834 /* re-add all VLAN filters */
835 for_each_set_bit(vid, adapter->vsi.active_cvlans, VLAN_N_VID)
836 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021Q));
837
838 for_each_set_bit(vid, adapter->vsi.active_svlans, VLAN_N_VID)
839 iavf_add_vlan(adapter, IAVF_VLAN(vid, ETH_P_8021AD));
840 }
841
842 /**
843 * iavf_get_num_vlans_added - get number of VLANs added
844 * @adapter: board private structure
845 */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)846 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
847 {
848 return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) +
849 bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID);
850 }
851
852 /**
853 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
854 * @adapter: board private structure
855 *
856 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
857 * do not impose a limit as that maintains current behavior and for
858 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
859 **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)860 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
861 {
862 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
863 * never been a limit on the VF driver side
864 */
865 if (VLAN_ALLOWED(adapter))
866 return VLAN_N_VID;
867 else if (VLAN_V2_ALLOWED(adapter))
868 return adapter->vlan_v2_caps.filtering.max_filters;
869
870 return 0;
871 }
872
873 /**
874 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
875 * @adapter: board private structure
876 **/
iavf_max_vlans_added(struct iavf_adapter * adapter)877 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
878 {
879 if (iavf_get_num_vlans_added(adapter) <
880 iavf_get_max_vlans_allowed(adapter))
881 return false;
882
883 return true;
884 }
885
886 /**
887 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
888 * @netdev: network device struct
889 * @proto: unused protocol data
890 * @vid: VLAN tag
891 **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)892 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
893 __always_unused __be16 proto, u16 vid)
894 {
895 struct iavf_adapter *adapter = netdev_priv(netdev);
896
897 if (!VLAN_FILTERING_ALLOWED(adapter))
898 return -EIO;
899
900 if (iavf_max_vlans_added(adapter)) {
901 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
902 iavf_get_max_vlans_allowed(adapter));
903 return -EIO;
904 }
905
906 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
907 return -ENOMEM;
908
909 return 0;
910 }
911
912 /**
913 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
914 * @netdev: network device struct
915 * @proto: unused protocol data
916 * @vid: VLAN tag
917 **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)918 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
919 __always_unused __be16 proto, u16 vid)
920 {
921 struct iavf_adapter *adapter = netdev_priv(netdev);
922
923 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
924 if (proto == cpu_to_be16(ETH_P_8021Q))
925 clear_bit(vid, adapter->vsi.active_cvlans);
926 else
927 clear_bit(vid, adapter->vsi.active_svlans);
928
929 return 0;
930 }
931
932 /**
933 * iavf_find_filter - Search filter list for specific mac filter
934 * @adapter: board private structure
935 * @macaddr: the MAC address
936 *
937 * Returns ptr to the filter object or NULL. Must be called while holding the
938 * mac_vlan_list_lock.
939 **/
940 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)941 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
942 const u8 *macaddr)
943 {
944 struct iavf_mac_filter *f;
945
946 if (!macaddr)
947 return NULL;
948
949 list_for_each_entry(f, &adapter->mac_filter_list, list) {
950 if (ether_addr_equal(macaddr, f->macaddr))
951 return f;
952 }
953 return NULL;
954 }
955
956 /**
957 * iavf_add_filter - Add a mac filter to the filter list
958 * @adapter: board private structure
959 * @macaddr: the MAC address
960 *
961 * Returns ptr to the filter object or NULL when no memory available.
962 **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)963 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
964 const u8 *macaddr)
965 {
966 struct iavf_mac_filter *f;
967
968 if (!macaddr)
969 return NULL;
970
971 f = iavf_find_filter(adapter, macaddr);
972 if (!f) {
973 f = kzalloc(sizeof(*f), GFP_ATOMIC);
974 if (!f)
975 return f;
976
977 ether_addr_copy(f->macaddr, macaddr);
978
979 list_add_tail(&f->list, &adapter->mac_filter_list);
980 f->add = true;
981 f->add_handled = false;
982 f->is_new_mac = true;
983 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
984 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
985 } else {
986 f->remove = false;
987 }
988
989 return f;
990 }
991
992 /**
993 * iavf_replace_primary_mac - Replace current primary address
994 * @adapter: board private structure
995 * @new_mac: new MAC address to be applied
996 *
997 * Replace current dev_addr and send request to PF for removal of previous
998 * primary MAC address filter and addition of new primary MAC filter.
999 * Return 0 for success, -ENOMEM for failure.
1000 *
1001 * Do not call this with mac_vlan_list_lock!
1002 **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)1003 int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1004 const u8 *new_mac)
1005 {
1006 struct iavf_hw *hw = &adapter->hw;
1007 struct iavf_mac_filter *f;
1008
1009 spin_lock_bh(&adapter->mac_vlan_list_lock);
1010
1011 list_for_each_entry(f, &adapter->mac_filter_list, list) {
1012 f->is_primary = false;
1013 }
1014
1015 f = iavf_find_filter(adapter, hw->mac.addr);
1016 if (f) {
1017 f->remove = true;
1018 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1019 }
1020
1021 f = iavf_add_filter(adapter, new_mac);
1022
1023 if (f) {
1024 /* Always send the request to add if changing primary MAC
1025 * even if filter is already present on the list
1026 */
1027 f->is_primary = true;
1028 f->add = true;
1029 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1030 ether_addr_copy(hw->mac.addr, new_mac);
1031 }
1032
1033 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1034
1035 /* schedule the watchdog task to immediately process the request */
1036 if (f) {
1037 queue_work(iavf_wq, &adapter->watchdog_task.work);
1038 return 0;
1039 }
1040 return -ENOMEM;
1041 }
1042
1043 /**
1044 * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1045 * @netdev: network interface device structure
1046 * @macaddr: MAC address to set
1047 *
1048 * Returns true on success, false on failure
1049 */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)1050 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1051 const u8 *macaddr)
1052 {
1053 struct iavf_adapter *adapter = netdev_priv(netdev);
1054 struct iavf_mac_filter *f;
1055 bool ret = false;
1056
1057 spin_lock_bh(&adapter->mac_vlan_list_lock);
1058
1059 f = iavf_find_filter(adapter, macaddr);
1060
1061 if (!f || (!f->add && f->add_handled))
1062 ret = true;
1063
1064 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1065
1066 return ret;
1067 }
1068
1069 /**
1070 * iavf_set_mac - NDO callback to set port MAC address
1071 * @netdev: network interface device structure
1072 * @p: pointer to an address structure
1073 *
1074 * Returns 0 on success, negative on failure
1075 */
iavf_set_mac(struct net_device * netdev,void * p)1076 static int iavf_set_mac(struct net_device *netdev, void *p)
1077 {
1078 struct iavf_adapter *adapter = netdev_priv(netdev);
1079 struct sockaddr *addr = p;
1080 int ret;
1081
1082 if (!is_valid_ether_addr(addr->sa_data))
1083 return -EADDRNOTAVAIL;
1084
1085 ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1086
1087 if (ret)
1088 return ret;
1089
1090 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1091 iavf_is_mac_set_handled(netdev, addr->sa_data),
1092 msecs_to_jiffies(2500));
1093
1094 /* If ret < 0 then it means wait was interrupted.
1095 * If ret == 0 then it means we got a timeout.
1096 * else it means we got response for set MAC from PF,
1097 * check if netdev MAC was updated to requested MAC,
1098 * if yes then set MAC succeeded otherwise it failed return -EACCES
1099 */
1100 if (ret < 0)
1101 return ret;
1102
1103 if (!ret)
1104 return -EAGAIN;
1105
1106 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1107 return -EACCES;
1108
1109 return 0;
1110 }
1111
1112 /**
1113 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1114 * @netdev: the netdevice
1115 * @addr: address to add
1116 *
1117 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1118 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1119 */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)1120 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1121 {
1122 struct iavf_adapter *adapter = netdev_priv(netdev);
1123
1124 if (iavf_add_filter(adapter, addr))
1125 return 0;
1126 else
1127 return -ENOMEM;
1128 }
1129
1130 /**
1131 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1132 * @netdev: the netdevice
1133 * @addr: address to add
1134 *
1135 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1136 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1137 */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)1138 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1139 {
1140 struct iavf_adapter *adapter = netdev_priv(netdev);
1141 struct iavf_mac_filter *f;
1142
1143 /* Under some circumstances, we might receive a request to delete
1144 * our own device address from our uc list. Because we store the
1145 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1146 * such requests and not delete our device address from this list.
1147 */
1148 if (ether_addr_equal(addr, netdev->dev_addr))
1149 return 0;
1150
1151 f = iavf_find_filter(adapter, addr);
1152 if (f) {
1153 f->remove = true;
1154 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1155 }
1156 return 0;
1157 }
1158
1159 /**
1160 * iavf_set_rx_mode - NDO callback to set the netdev filters
1161 * @netdev: network interface device structure
1162 **/
iavf_set_rx_mode(struct net_device * netdev)1163 static void iavf_set_rx_mode(struct net_device *netdev)
1164 {
1165 struct iavf_adapter *adapter = netdev_priv(netdev);
1166
1167 spin_lock_bh(&adapter->mac_vlan_list_lock);
1168 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1169 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1170 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1171
1172 if (netdev->flags & IFF_PROMISC &&
1173 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1174 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1175 else if (!(netdev->flags & IFF_PROMISC) &&
1176 adapter->flags & IAVF_FLAG_PROMISC_ON)
1177 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1178
1179 if (netdev->flags & IFF_ALLMULTI &&
1180 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1181 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1182 else if (!(netdev->flags & IFF_ALLMULTI) &&
1183 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1184 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1185 }
1186
1187 /**
1188 * iavf_napi_enable_all - enable NAPI on all queue vectors
1189 * @adapter: board private structure
1190 **/
iavf_napi_enable_all(struct iavf_adapter * adapter)1191 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1192 {
1193 int q_idx;
1194 struct iavf_q_vector *q_vector;
1195 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1196
1197 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1198 struct napi_struct *napi;
1199
1200 q_vector = &adapter->q_vectors[q_idx];
1201 napi = &q_vector->napi;
1202 napi_enable(napi);
1203 }
1204 }
1205
1206 /**
1207 * iavf_napi_disable_all - disable NAPI on all queue vectors
1208 * @adapter: board private structure
1209 **/
iavf_napi_disable_all(struct iavf_adapter * adapter)1210 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1211 {
1212 int q_idx;
1213 struct iavf_q_vector *q_vector;
1214 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1215
1216 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1217 q_vector = &adapter->q_vectors[q_idx];
1218 napi_disable(&q_vector->napi);
1219 }
1220 }
1221
1222 /**
1223 * iavf_configure - set up transmit and receive data structures
1224 * @adapter: board private structure
1225 **/
iavf_configure(struct iavf_adapter * adapter)1226 static void iavf_configure(struct iavf_adapter *adapter)
1227 {
1228 struct net_device *netdev = adapter->netdev;
1229 int i;
1230
1231 iavf_set_rx_mode(netdev);
1232
1233 iavf_configure_tx(adapter);
1234 iavf_configure_rx(adapter);
1235 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1236
1237 for (i = 0; i < adapter->num_active_queues; i++) {
1238 struct iavf_ring *ring = &adapter->rx_rings[i];
1239
1240 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1241 }
1242 }
1243
1244 /**
1245 * iavf_up_complete - Finish the last steps of bringing up a connection
1246 * @adapter: board private structure
1247 *
1248 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1249 **/
iavf_up_complete(struct iavf_adapter * adapter)1250 static void iavf_up_complete(struct iavf_adapter *adapter)
1251 {
1252 iavf_change_state(adapter, __IAVF_RUNNING);
1253 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1254
1255 iavf_napi_enable_all(adapter);
1256
1257 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1258 if (CLIENT_ENABLED(adapter))
1259 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1260 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1261 }
1262
1263 /**
1264 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1265 * yet and mark other to be removed.
1266 * @adapter: board private structure
1267 **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)1268 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1269 {
1270 struct iavf_vlan_filter *vlf, *vlftmp;
1271 struct iavf_mac_filter *f, *ftmp;
1272
1273 spin_lock_bh(&adapter->mac_vlan_list_lock);
1274 /* clear the sync flag on all filters */
1275 __dev_uc_unsync(adapter->netdev, NULL);
1276 __dev_mc_unsync(adapter->netdev, NULL);
1277
1278 /* remove all MAC filters */
1279 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1280 list) {
1281 if (f->add) {
1282 list_del(&f->list);
1283 kfree(f);
1284 } else {
1285 f->remove = true;
1286 }
1287 }
1288
1289 /* remove all VLAN filters */
1290 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1291 list) {
1292 if (vlf->add) {
1293 list_del(&vlf->list);
1294 kfree(vlf);
1295 } else {
1296 vlf->remove = true;
1297 }
1298 }
1299 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1300 }
1301
1302 /**
1303 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1304 * mark other to be removed.
1305 * @adapter: board private structure
1306 **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)1307 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1308 {
1309 struct iavf_cloud_filter *cf, *cftmp;
1310
1311 /* remove all cloud filters */
1312 spin_lock_bh(&adapter->cloud_filter_list_lock);
1313 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1314 list) {
1315 if (cf->add) {
1316 list_del(&cf->list);
1317 kfree(cf);
1318 adapter->num_cloud_filters--;
1319 } else {
1320 cf->del = true;
1321 }
1322 }
1323 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1324 }
1325
1326 /**
1327 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1328 * other to be removed.
1329 * @adapter: board private structure
1330 **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)1331 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1332 {
1333 struct iavf_fdir_fltr *fdir, *fdirtmp;
1334
1335 /* remove all Flow Director filters */
1336 spin_lock_bh(&adapter->fdir_fltr_lock);
1337 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1338 list) {
1339 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1340 list_del(&fdir->list);
1341 kfree(fdir);
1342 adapter->fdir_active_fltr--;
1343 } else {
1344 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1345 }
1346 }
1347 spin_unlock_bh(&adapter->fdir_fltr_lock);
1348 }
1349
1350 /**
1351 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1352 * other to be removed.
1353 * @adapter: board private structure
1354 **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)1355 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1356 {
1357 struct iavf_adv_rss *rss, *rsstmp;
1358
1359 /* remove all advance RSS configuration */
1360 spin_lock_bh(&adapter->adv_rss_lock);
1361 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1362 list) {
1363 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1364 list_del(&rss->list);
1365 kfree(rss);
1366 } else {
1367 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1368 }
1369 }
1370 spin_unlock_bh(&adapter->adv_rss_lock);
1371 }
1372
1373 /**
1374 * iavf_down - Shutdown the connection processing
1375 * @adapter: board private structure
1376 *
1377 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1378 **/
iavf_down(struct iavf_adapter * adapter)1379 void iavf_down(struct iavf_adapter *adapter)
1380 {
1381 struct net_device *netdev = adapter->netdev;
1382
1383 if (adapter->state <= __IAVF_DOWN_PENDING)
1384 return;
1385
1386 netif_carrier_off(netdev);
1387 netif_tx_disable(netdev);
1388 adapter->link_up = false;
1389 iavf_napi_disable_all(adapter);
1390 iavf_irq_disable(adapter);
1391
1392 iavf_clear_mac_vlan_filters(adapter);
1393 iavf_clear_cloud_filters(adapter);
1394 iavf_clear_fdir_filters(adapter);
1395 iavf_clear_adv_rss_conf(adapter);
1396
1397 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
1398 /* cancel any current operation */
1399 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1400 /* Schedule operations to close down the HW. Don't wait
1401 * here for this to complete. The watchdog is still running
1402 * and it will take care of this.
1403 */
1404 if (!list_empty(&adapter->mac_filter_list))
1405 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1406 if (!list_empty(&adapter->vlan_filter_list))
1407 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1408 if (!list_empty(&adapter->cloud_filter_list))
1409 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1410 if (!list_empty(&adapter->fdir_list_head))
1411 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1412 if (!list_empty(&adapter->adv_rss_list_head))
1413 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1414 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1415 }
1416
1417 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1418 }
1419
1420 /**
1421 * iavf_acquire_msix_vectors - Setup the MSIX capability
1422 * @adapter: board private structure
1423 * @vectors: number of vectors to request
1424 *
1425 * Work with the OS to set up the MSIX vectors needed.
1426 *
1427 * Returns 0 on success, negative on failure
1428 **/
1429 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1430 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1431 {
1432 int err, vector_threshold;
1433
1434 /* We'll want at least 3 (vector_threshold):
1435 * 0) Other (Admin Queue and link, mostly)
1436 * 1) TxQ[0] Cleanup
1437 * 2) RxQ[0] Cleanup
1438 */
1439 vector_threshold = MIN_MSIX_COUNT;
1440
1441 /* The more we get, the more we will assign to Tx/Rx Cleanup
1442 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1443 * Right now, we simply care about how many we'll get; we'll
1444 * set them up later while requesting irq's.
1445 */
1446 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1447 vector_threshold, vectors);
1448 if (err < 0) {
1449 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1450 kfree(adapter->msix_entries);
1451 adapter->msix_entries = NULL;
1452 return err;
1453 }
1454
1455 /* Adjust for only the vectors we'll use, which is minimum
1456 * of max_msix_q_vectors + NONQ_VECS, or the number of
1457 * vectors we were allocated.
1458 */
1459 adapter->num_msix_vectors = err;
1460 return 0;
1461 }
1462
1463 /**
1464 * iavf_free_queues - Free memory for all rings
1465 * @adapter: board private structure to initialize
1466 *
1467 * Free all of the memory associated with queue pairs.
1468 **/
iavf_free_queues(struct iavf_adapter * adapter)1469 static void iavf_free_queues(struct iavf_adapter *adapter)
1470 {
1471 if (!adapter->vsi_res)
1472 return;
1473 adapter->num_active_queues = 0;
1474 kfree(adapter->tx_rings);
1475 adapter->tx_rings = NULL;
1476 kfree(adapter->rx_rings);
1477 adapter->rx_rings = NULL;
1478 }
1479
1480 /**
1481 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1482 * @adapter: board private structure
1483 *
1484 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1485 * stripped in certain descriptor fields. Instead of checking the offload
1486 * capability bits in the hot path, cache the location the ring specific
1487 * flags.
1488 */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1489 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1490 {
1491 int i;
1492
1493 for (i = 0; i < adapter->num_active_queues; i++) {
1494 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1495 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1496
1497 /* prevent multiple L2TAG bits being set after VFR */
1498 tx_ring->flags &=
1499 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1500 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1501 rx_ring->flags &=
1502 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1503 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1504
1505 if (VLAN_ALLOWED(adapter)) {
1506 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1507 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1508 } else if (VLAN_V2_ALLOWED(adapter)) {
1509 struct virtchnl_vlan_supported_caps *stripping_support;
1510 struct virtchnl_vlan_supported_caps *insertion_support;
1511
1512 stripping_support =
1513 &adapter->vlan_v2_caps.offloads.stripping_support;
1514 insertion_support =
1515 &adapter->vlan_v2_caps.offloads.insertion_support;
1516
1517 if (stripping_support->outer) {
1518 if (stripping_support->outer &
1519 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1520 rx_ring->flags |=
1521 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1522 else if (stripping_support->outer &
1523 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1524 rx_ring->flags |=
1525 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1526 } else if (stripping_support->inner) {
1527 if (stripping_support->inner &
1528 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1529 rx_ring->flags |=
1530 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1531 else if (stripping_support->inner &
1532 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1533 rx_ring->flags |=
1534 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1535 }
1536
1537 if (insertion_support->outer) {
1538 if (insertion_support->outer &
1539 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1540 tx_ring->flags |=
1541 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1542 else if (insertion_support->outer &
1543 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1544 tx_ring->flags |=
1545 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1546 } else if (insertion_support->inner) {
1547 if (insertion_support->inner &
1548 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1549 tx_ring->flags |=
1550 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1551 else if (insertion_support->inner &
1552 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1553 tx_ring->flags |=
1554 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1555 }
1556 }
1557 }
1558 }
1559
1560 /**
1561 * iavf_alloc_queues - Allocate memory for all rings
1562 * @adapter: board private structure to initialize
1563 *
1564 * We allocate one ring per queue at run-time since we don't know the
1565 * number of queues at compile-time. The polling_netdev array is
1566 * intended for Multiqueue, but should work fine with a single queue.
1567 **/
iavf_alloc_queues(struct iavf_adapter * adapter)1568 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1569 {
1570 int i, num_active_queues;
1571
1572 /* If we're in reset reallocating queues we don't actually know yet for
1573 * certain the PF gave us the number of queues we asked for but we'll
1574 * assume it did. Once basic reset is finished we'll confirm once we
1575 * start negotiating config with PF.
1576 */
1577 if (adapter->num_req_queues)
1578 num_active_queues = adapter->num_req_queues;
1579 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1580 adapter->num_tc)
1581 num_active_queues = adapter->ch_config.total_qps;
1582 else
1583 num_active_queues = min_t(int,
1584 adapter->vsi_res->num_queue_pairs,
1585 (int)(num_online_cpus()));
1586
1587
1588 adapter->tx_rings = kcalloc(num_active_queues,
1589 sizeof(struct iavf_ring), GFP_KERNEL);
1590 if (!adapter->tx_rings)
1591 goto err_out;
1592 adapter->rx_rings = kcalloc(num_active_queues,
1593 sizeof(struct iavf_ring), GFP_KERNEL);
1594 if (!adapter->rx_rings)
1595 goto err_out;
1596
1597 for (i = 0; i < num_active_queues; i++) {
1598 struct iavf_ring *tx_ring;
1599 struct iavf_ring *rx_ring;
1600
1601 tx_ring = &adapter->tx_rings[i];
1602
1603 tx_ring->queue_index = i;
1604 tx_ring->netdev = adapter->netdev;
1605 tx_ring->dev = &adapter->pdev->dev;
1606 tx_ring->count = adapter->tx_desc_count;
1607 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1608 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1609 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1610
1611 rx_ring = &adapter->rx_rings[i];
1612 rx_ring->queue_index = i;
1613 rx_ring->netdev = adapter->netdev;
1614 rx_ring->dev = &adapter->pdev->dev;
1615 rx_ring->count = adapter->rx_desc_count;
1616 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1617 }
1618
1619 adapter->num_active_queues = num_active_queues;
1620
1621 iavf_set_queue_vlan_tag_loc(adapter);
1622
1623 return 0;
1624
1625 err_out:
1626 iavf_free_queues(adapter);
1627 return -ENOMEM;
1628 }
1629
1630 /**
1631 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1632 * @adapter: board private structure to initialize
1633 *
1634 * Attempt to configure the interrupts using the best available
1635 * capabilities of the hardware and the kernel.
1636 **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1637 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1638 {
1639 int vector, v_budget;
1640 int pairs = 0;
1641 int err = 0;
1642
1643 if (!adapter->vsi_res) {
1644 err = -EIO;
1645 goto out;
1646 }
1647 pairs = adapter->num_active_queues;
1648
1649 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1650 * us much good if we have more vectors than CPUs. However, we already
1651 * limit the total number of queues by the number of CPUs so we do not
1652 * need any further limiting here.
1653 */
1654 v_budget = min_t(int, pairs + NONQ_VECS,
1655 (int)adapter->vf_res->max_vectors);
1656
1657 adapter->msix_entries = kcalloc(v_budget,
1658 sizeof(struct msix_entry), GFP_KERNEL);
1659 if (!adapter->msix_entries) {
1660 err = -ENOMEM;
1661 goto out;
1662 }
1663
1664 for (vector = 0; vector < v_budget; vector++)
1665 adapter->msix_entries[vector].entry = vector;
1666
1667 err = iavf_acquire_msix_vectors(adapter, v_budget);
1668
1669 out:
1670 netif_set_real_num_rx_queues(adapter->netdev, pairs);
1671 netif_set_real_num_tx_queues(adapter->netdev, pairs);
1672 return err;
1673 }
1674
1675 /**
1676 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1677 * @adapter: board private structure
1678 *
1679 * Return 0 on success, negative on failure
1680 **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1681 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1682 {
1683 struct iavf_aqc_get_set_rss_key_data *rss_key =
1684 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1685 struct iavf_hw *hw = &adapter->hw;
1686 enum iavf_status status;
1687
1688 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1689 /* bail because we already have a command pending */
1690 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1691 adapter->current_op);
1692 return -EBUSY;
1693 }
1694
1695 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1696 if (status) {
1697 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1698 iavf_stat_str(hw, status),
1699 iavf_aq_str(hw, hw->aq.asq_last_status));
1700 return iavf_status_to_errno(status);
1701
1702 }
1703
1704 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1705 adapter->rss_lut, adapter->rss_lut_size);
1706 if (status) {
1707 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1708 iavf_stat_str(hw, status),
1709 iavf_aq_str(hw, hw->aq.asq_last_status));
1710 return iavf_status_to_errno(status);
1711 }
1712
1713 return 0;
1714
1715 }
1716
1717 /**
1718 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1719 * @adapter: board private structure
1720 *
1721 * Returns 0 on success, negative on failure
1722 **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1723 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1724 {
1725 struct iavf_hw *hw = &adapter->hw;
1726 u32 *dw;
1727 u16 i;
1728
1729 dw = (u32 *)adapter->rss_key;
1730 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1731 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1732
1733 dw = (u32 *)adapter->rss_lut;
1734 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1735 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1736
1737 iavf_flush(hw);
1738
1739 return 0;
1740 }
1741
1742 /**
1743 * iavf_config_rss - Configure RSS keys and lut
1744 * @adapter: board private structure
1745 *
1746 * Returns 0 on success, negative on failure
1747 **/
iavf_config_rss(struct iavf_adapter * adapter)1748 int iavf_config_rss(struct iavf_adapter *adapter)
1749 {
1750
1751 if (RSS_PF(adapter)) {
1752 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1753 IAVF_FLAG_AQ_SET_RSS_KEY;
1754 return 0;
1755 } else if (RSS_AQ(adapter)) {
1756 return iavf_config_rss_aq(adapter);
1757 } else {
1758 return iavf_config_rss_reg(adapter);
1759 }
1760 }
1761
1762 /**
1763 * iavf_fill_rss_lut - Fill the lut with default values
1764 * @adapter: board private structure
1765 **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1766 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1767 {
1768 u16 i;
1769
1770 for (i = 0; i < adapter->rss_lut_size; i++)
1771 adapter->rss_lut[i] = i % adapter->num_active_queues;
1772 }
1773
1774 /**
1775 * iavf_init_rss - Prepare for RSS
1776 * @adapter: board private structure
1777 *
1778 * Return 0 on success, negative on failure
1779 **/
iavf_init_rss(struct iavf_adapter * adapter)1780 static int iavf_init_rss(struct iavf_adapter *adapter)
1781 {
1782 struct iavf_hw *hw = &adapter->hw;
1783
1784 if (!RSS_PF(adapter)) {
1785 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1786 if (adapter->vf_res->vf_cap_flags &
1787 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1788 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1789 else
1790 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1791
1792 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1793 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1794 }
1795
1796 iavf_fill_rss_lut(adapter);
1797 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1798
1799 return iavf_config_rss(adapter);
1800 }
1801
1802 /**
1803 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1804 * @adapter: board private structure to initialize
1805 *
1806 * We allocate one q_vector per queue interrupt. If allocation fails we
1807 * return -ENOMEM.
1808 **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1809 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1810 {
1811 int q_idx = 0, num_q_vectors;
1812 struct iavf_q_vector *q_vector;
1813
1814 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1815 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1816 GFP_KERNEL);
1817 if (!adapter->q_vectors)
1818 return -ENOMEM;
1819
1820 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1821 q_vector = &adapter->q_vectors[q_idx];
1822 q_vector->adapter = adapter;
1823 q_vector->vsi = &adapter->vsi;
1824 q_vector->v_idx = q_idx;
1825 q_vector->reg_idx = q_idx;
1826 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1827 netif_napi_add(adapter->netdev, &q_vector->napi,
1828 iavf_napi_poll);
1829 }
1830
1831 return 0;
1832 }
1833
1834 /**
1835 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1836 * @adapter: board private structure to initialize
1837 *
1838 * This function frees the memory allocated to the q_vectors. In addition if
1839 * NAPI is enabled it will delete any references to the NAPI struct prior
1840 * to freeing the q_vector.
1841 **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1842 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1843 {
1844 int q_idx, num_q_vectors;
1845 int napi_vectors;
1846
1847 if (!adapter->q_vectors)
1848 return;
1849
1850 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1851 napi_vectors = adapter->num_active_queues;
1852
1853 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1854 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1855
1856 if (q_idx < napi_vectors)
1857 netif_napi_del(&q_vector->napi);
1858 }
1859 kfree(adapter->q_vectors);
1860 adapter->q_vectors = NULL;
1861 }
1862
1863 /**
1864 * iavf_reset_interrupt_capability - Reset MSIX setup
1865 * @adapter: board private structure
1866 *
1867 **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1868 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1869 {
1870 if (!adapter->msix_entries)
1871 return;
1872
1873 pci_disable_msix(adapter->pdev);
1874 kfree(adapter->msix_entries);
1875 adapter->msix_entries = NULL;
1876 }
1877
1878 /**
1879 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1880 * @adapter: board private structure to initialize
1881 *
1882 **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1883 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1884 {
1885 int err;
1886
1887 err = iavf_alloc_queues(adapter);
1888 if (err) {
1889 dev_err(&adapter->pdev->dev,
1890 "Unable to allocate memory for queues\n");
1891 goto err_alloc_queues;
1892 }
1893
1894 rtnl_lock();
1895 err = iavf_set_interrupt_capability(adapter);
1896 rtnl_unlock();
1897 if (err) {
1898 dev_err(&adapter->pdev->dev,
1899 "Unable to setup interrupt capabilities\n");
1900 goto err_set_interrupt;
1901 }
1902
1903 err = iavf_alloc_q_vectors(adapter);
1904 if (err) {
1905 dev_err(&adapter->pdev->dev,
1906 "Unable to allocate memory for queue vectors\n");
1907 goto err_alloc_q_vectors;
1908 }
1909
1910 /* If we've made it so far while ADq flag being ON, then we haven't
1911 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1912 * resources have been allocated in the reset path.
1913 * Now we can truly claim that ADq is enabled.
1914 */
1915 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1916 adapter->num_tc)
1917 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1918 adapter->num_tc);
1919
1920 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1921 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1922 adapter->num_active_queues);
1923
1924 return 0;
1925 err_alloc_q_vectors:
1926 iavf_reset_interrupt_capability(adapter);
1927 err_set_interrupt:
1928 iavf_free_queues(adapter);
1929 err_alloc_queues:
1930 return err;
1931 }
1932
1933 /**
1934 * iavf_free_rss - Free memory used by RSS structs
1935 * @adapter: board private structure
1936 **/
iavf_free_rss(struct iavf_adapter * adapter)1937 static void iavf_free_rss(struct iavf_adapter *adapter)
1938 {
1939 kfree(adapter->rss_key);
1940 adapter->rss_key = NULL;
1941
1942 kfree(adapter->rss_lut);
1943 adapter->rss_lut = NULL;
1944 }
1945
1946 /**
1947 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1948 * @adapter: board private structure
1949 *
1950 * Returns 0 on success, negative on failure
1951 **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter)1952 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1953 {
1954 struct net_device *netdev = adapter->netdev;
1955 int err;
1956
1957 if (netif_running(netdev))
1958 iavf_free_traffic_irqs(adapter);
1959 iavf_free_misc_irq(adapter);
1960 iavf_reset_interrupt_capability(adapter);
1961 iavf_free_q_vectors(adapter);
1962 iavf_free_queues(adapter);
1963
1964 err = iavf_init_interrupt_scheme(adapter);
1965 if (err)
1966 goto err;
1967
1968 netif_tx_stop_all_queues(netdev);
1969
1970 err = iavf_request_misc_irq(adapter);
1971 if (err)
1972 goto err;
1973
1974 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1975
1976 iavf_map_rings_to_vectors(adapter);
1977 err:
1978 return err;
1979 }
1980
1981 /**
1982 * iavf_process_aq_command - process aq_required flags
1983 * and sends aq command
1984 * @adapter: pointer to iavf adapter structure
1985 *
1986 * Returns 0 on success
1987 * Returns error code if no command was sent
1988 * or error code if the command failed.
1989 **/
iavf_process_aq_command(struct iavf_adapter * adapter)1990 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1991 {
1992 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1993 return iavf_send_vf_config_msg(adapter);
1994 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
1995 return iavf_send_vf_offload_vlan_v2_msg(adapter);
1996 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1997 iavf_disable_queues(adapter);
1998 return 0;
1999 }
2000
2001 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2002 iavf_map_queues(adapter);
2003 return 0;
2004 }
2005
2006 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2007 iavf_add_ether_addrs(adapter);
2008 return 0;
2009 }
2010
2011 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2012 iavf_add_vlans(adapter);
2013 return 0;
2014 }
2015
2016 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2017 iavf_del_ether_addrs(adapter);
2018 return 0;
2019 }
2020
2021 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2022 iavf_del_vlans(adapter);
2023 return 0;
2024 }
2025
2026 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2027 iavf_enable_vlan_stripping(adapter);
2028 return 0;
2029 }
2030
2031 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2032 iavf_disable_vlan_stripping(adapter);
2033 return 0;
2034 }
2035
2036 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2037 iavf_configure_queues(adapter);
2038 return 0;
2039 }
2040
2041 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2042 iavf_enable_queues(adapter);
2043 return 0;
2044 }
2045
2046 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2047 /* This message goes straight to the firmware, not the
2048 * PF, so we don't have to set current_op as we will
2049 * not get a response through the ARQ.
2050 */
2051 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2052 return 0;
2053 }
2054 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2055 iavf_get_hena(adapter);
2056 return 0;
2057 }
2058 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2059 iavf_set_hena(adapter);
2060 return 0;
2061 }
2062 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2063 iavf_set_rss_key(adapter);
2064 return 0;
2065 }
2066 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2067 iavf_set_rss_lut(adapter);
2068 return 0;
2069 }
2070
2071 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2072 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2073 FLAG_VF_MULTICAST_PROMISC);
2074 return 0;
2075 }
2076
2077 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2078 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2079 return 0;
2080 }
2081 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2082 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2083 iavf_set_promiscuous(adapter, 0);
2084 return 0;
2085 }
2086
2087 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2088 iavf_enable_channels(adapter);
2089 return 0;
2090 }
2091
2092 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2093 iavf_disable_channels(adapter);
2094 return 0;
2095 }
2096 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2097 iavf_add_cloud_filter(adapter);
2098 return 0;
2099 }
2100
2101 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2102 iavf_del_cloud_filter(adapter);
2103 return 0;
2104 }
2105 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2106 iavf_del_cloud_filter(adapter);
2107 return 0;
2108 }
2109 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2110 iavf_add_cloud_filter(adapter);
2111 return 0;
2112 }
2113 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2114 iavf_add_fdir_filter(adapter);
2115 return IAVF_SUCCESS;
2116 }
2117 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2118 iavf_del_fdir_filter(adapter);
2119 return IAVF_SUCCESS;
2120 }
2121 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2122 iavf_add_adv_rss_cfg(adapter);
2123 return 0;
2124 }
2125 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2126 iavf_del_adv_rss_cfg(adapter);
2127 return 0;
2128 }
2129 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2130 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2131 return 0;
2132 }
2133 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2134 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2135 return 0;
2136 }
2137 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2138 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2139 return 0;
2140 }
2141 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2142 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2143 return 0;
2144 }
2145 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2146 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2147 return 0;
2148 }
2149 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2150 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2151 return 0;
2152 }
2153 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2154 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2155 return 0;
2156 }
2157 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2158 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2159 return 0;
2160 }
2161
2162 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2163 iavf_request_stats(adapter);
2164 return 0;
2165 }
2166
2167 return -EAGAIN;
2168 }
2169
2170 /**
2171 * iavf_set_vlan_offload_features - set VLAN offload configuration
2172 * @adapter: board private structure
2173 * @prev_features: previous features used for comparison
2174 * @features: updated features used for configuration
2175 *
2176 * Set the aq_required bit(s) based on the requested features passed in to
2177 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2178 * the watchdog if any changes are requested to expedite the request via
2179 * virtchnl.
2180 **/
2181 void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)2182 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2183 netdev_features_t prev_features,
2184 netdev_features_t features)
2185 {
2186 bool enable_stripping = true, enable_insertion = true;
2187 u16 vlan_ethertype = 0;
2188 u64 aq_required = 0;
2189
2190 /* keep cases separate because one ethertype for offloads can be
2191 * disabled at the same time as another is disabled, so check for an
2192 * enabled ethertype first, then check for disabled. Default to
2193 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2194 * stripping.
2195 */
2196 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2197 vlan_ethertype = ETH_P_8021AD;
2198 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2199 vlan_ethertype = ETH_P_8021Q;
2200 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2201 vlan_ethertype = ETH_P_8021AD;
2202 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2203 vlan_ethertype = ETH_P_8021Q;
2204 else
2205 vlan_ethertype = ETH_P_8021Q;
2206
2207 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2208 enable_stripping = false;
2209 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2210 enable_insertion = false;
2211
2212 if (VLAN_ALLOWED(adapter)) {
2213 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2214 * stripping via virtchnl. VLAN insertion can be toggled on the
2215 * netdev, but it doesn't require a virtchnl message
2216 */
2217 if (enable_stripping)
2218 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2219 else
2220 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2221
2222 } else if (VLAN_V2_ALLOWED(adapter)) {
2223 switch (vlan_ethertype) {
2224 case ETH_P_8021Q:
2225 if (enable_stripping)
2226 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2227 else
2228 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2229
2230 if (enable_insertion)
2231 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2232 else
2233 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2234 break;
2235 case ETH_P_8021AD:
2236 if (enable_stripping)
2237 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2238 else
2239 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2240
2241 if (enable_insertion)
2242 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2243 else
2244 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2245 break;
2246 }
2247 }
2248
2249 if (aq_required) {
2250 adapter->aq_required |= aq_required;
2251 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
2252 }
2253 }
2254
2255 /**
2256 * iavf_startup - first step of driver startup
2257 * @adapter: board private structure
2258 *
2259 * Function process __IAVF_STARTUP driver state.
2260 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2261 * when fails the state is changed to __IAVF_INIT_FAILED
2262 **/
iavf_startup(struct iavf_adapter * adapter)2263 static void iavf_startup(struct iavf_adapter *adapter)
2264 {
2265 struct pci_dev *pdev = adapter->pdev;
2266 struct iavf_hw *hw = &adapter->hw;
2267 enum iavf_status status;
2268 int ret;
2269
2270 WARN_ON(adapter->state != __IAVF_STARTUP);
2271
2272 /* driver loaded, probe complete */
2273 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2274 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2275 status = iavf_set_mac_type(hw);
2276 if (status) {
2277 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2278 goto err;
2279 }
2280
2281 ret = iavf_check_reset_complete(hw);
2282 if (ret) {
2283 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2284 ret);
2285 goto err;
2286 }
2287 hw->aq.num_arq_entries = IAVF_AQ_LEN;
2288 hw->aq.num_asq_entries = IAVF_AQ_LEN;
2289 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2290 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2291
2292 status = iavf_init_adminq(hw);
2293 if (status) {
2294 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2295 status);
2296 goto err;
2297 }
2298 ret = iavf_send_api_ver(adapter);
2299 if (ret) {
2300 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2301 iavf_shutdown_adminq(hw);
2302 goto err;
2303 }
2304 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2305 return;
2306 err:
2307 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2308 }
2309
2310 /**
2311 * iavf_init_version_check - second step of driver startup
2312 * @adapter: board private structure
2313 *
2314 * Function process __IAVF_INIT_VERSION_CHECK driver state.
2315 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2316 * when fails the state is changed to __IAVF_INIT_FAILED
2317 **/
iavf_init_version_check(struct iavf_adapter * adapter)2318 static void iavf_init_version_check(struct iavf_adapter *adapter)
2319 {
2320 struct pci_dev *pdev = adapter->pdev;
2321 struct iavf_hw *hw = &adapter->hw;
2322 int err = -EAGAIN;
2323
2324 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2325
2326 if (!iavf_asq_done(hw)) {
2327 dev_err(&pdev->dev, "Admin queue command never completed\n");
2328 iavf_shutdown_adminq(hw);
2329 iavf_change_state(adapter, __IAVF_STARTUP);
2330 goto err;
2331 }
2332
2333 /* aq msg sent, awaiting reply */
2334 err = iavf_verify_api_ver(adapter);
2335 if (err) {
2336 if (err == -EALREADY)
2337 err = iavf_send_api_ver(adapter);
2338 else
2339 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2340 adapter->pf_version.major,
2341 adapter->pf_version.minor,
2342 VIRTCHNL_VERSION_MAJOR,
2343 VIRTCHNL_VERSION_MINOR);
2344 goto err;
2345 }
2346 err = iavf_send_vf_config_msg(adapter);
2347 if (err) {
2348 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2349 err);
2350 goto err;
2351 }
2352 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2353 return;
2354 err:
2355 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2356 }
2357
2358 /**
2359 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2360 * @adapter: board private structure
2361 */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2362 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2363 {
2364 int i, num_req_queues = adapter->num_req_queues;
2365 struct iavf_vsi *vsi = &adapter->vsi;
2366
2367 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2368 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2369 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2370 }
2371 if (!adapter->vsi_res) {
2372 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2373 return -ENODEV;
2374 }
2375
2376 if (num_req_queues &&
2377 num_req_queues > adapter->vsi_res->num_queue_pairs) {
2378 /* Problem. The PF gave us fewer queues than what we had
2379 * negotiated in our request. Need a reset to see if we can't
2380 * get back to a working state.
2381 */
2382 dev_err(&adapter->pdev->dev,
2383 "Requested %d queues, but PF only gave us %d.\n",
2384 num_req_queues,
2385 adapter->vsi_res->num_queue_pairs);
2386 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2387 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2388 iavf_schedule_reset(adapter);
2389
2390 return -EAGAIN;
2391 }
2392 adapter->num_req_queues = 0;
2393 adapter->vsi.id = adapter->vsi_res->vsi_id;
2394
2395 adapter->vsi.back = adapter;
2396 adapter->vsi.base_vector = 1;
2397 vsi->netdev = adapter->netdev;
2398 vsi->qs_handle = adapter->vsi_res->qset_handle;
2399 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2400 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2401 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2402 } else {
2403 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2404 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2405 }
2406
2407 return 0;
2408 }
2409
2410 /**
2411 * iavf_init_get_resources - third step of driver startup
2412 * @adapter: board private structure
2413 *
2414 * Function process __IAVF_INIT_GET_RESOURCES driver state and
2415 * finishes driver initialization procedure.
2416 * When success the state is changed to __IAVF_DOWN
2417 * when fails the state is changed to __IAVF_INIT_FAILED
2418 **/
iavf_init_get_resources(struct iavf_adapter * adapter)2419 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2420 {
2421 struct pci_dev *pdev = adapter->pdev;
2422 struct iavf_hw *hw = &adapter->hw;
2423 int err;
2424
2425 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2426 /* aq msg sent, awaiting reply */
2427 if (!adapter->vf_res) {
2428 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2429 GFP_KERNEL);
2430 if (!adapter->vf_res) {
2431 err = -ENOMEM;
2432 goto err;
2433 }
2434 }
2435 err = iavf_get_vf_config(adapter);
2436 if (err == -EALREADY) {
2437 err = iavf_send_vf_config_msg(adapter);
2438 goto err;
2439 } else if (err == -EINVAL) {
2440 /* We only get -EINVAL if the device is in a very bad
2441 * state or if we've been disabled for previous bad
2442 * behavior. Either way, we're done now.
2443 */
2444 iavf_shutdown_adminq(hw);
2445 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2446 return;
2447 }
2448 if (err) {
2449 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2450 goto err_alloc;
2451 }
2452
2453 err = iavf_parse_vf_resource_msg(adapter);
2454 if (err) {
2455 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2456 err);
2457 goto err_alloc;
2458 }
2459 /* Some features require additional messages to negotiate extended
2460 * capabilities. These are processed in sequence by the
2461 * __IAVF_INIT_EXTENDED_CAPS driver state.
2462 */
2463 adapter->extended_caps = IAVF_EXTENDED_CAPS;
2464
2465 iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2466 return;
2467
2468 err_alloc:
2469 kfree(adapter->vf_res);
2470 adapter->vf_res = NULL;
2471 err:
2472 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2473 }
2474
2475 /**
2476 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2477 * @adapter: board private structure
2478 *
2479 * Function processes send of the extended VLAN V2 capability message to the
2480 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2481 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2482 */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)2483 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2484 {
2485 int ret;
2486
2487 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2488
2489 ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2490 if (ret && ret == -EOPNOTSUPP) {
2491 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2492 * we did not send the capability exchange message and do not
2493 * expect a response.
2494 */
2495 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2496 }
2497
2498 /* We sent the message, so move on to the next step */
2499 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2500 }
2501
2502 /**
2503 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2504 * @adapter: board private structure
2505 *
2506 * Function processes receipt of the extended VLAN V2 capability message from
2507 * the PF.
2508 **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)2509 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2510 {
2511 int ret;
2512
2513 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2514
2515 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2516
2517 ret = iavf_get_vf_vlan_v2_caps(adapter);
2518 if (ret)
2519 goto err;
2520
2521 /* We've processed receipt of the VLAN V2 caps message */
2522 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2523 return;
2524 err:
2525 /* We didn't receive a reply. Make sure we try sending again when
2526 * __IAVF_INIT_FAILED attempts to recover.
2527 */
2528 adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2529 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2530 }
2531
2532 /**
2533 * iavf_init_process_extended_caps - Part of driver startup
2534 * @adapter: board private structure
2535 *
2536 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2537 * handles negotiating capabilities for features which require an additional
2538 * message.
2539 *
2540 * Once all extended capabilities exchanges are finished, the driver will
2541 * transition into __IAVF_INIT_CONFIG_ADAPTER.
2542 */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)2543 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2544 {
2545 WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2546
2547 /* Process capability exchange for VLAN V2 */
2548 if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2549 iavf_init_send_offload_vlan_v2_caps(adapter);
2550 return;
2551 } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2552 iavf_init_recv_offload_vlan_v2_caps(adapter);
2553 return;
2554 }
2555
2556 /* When we reach here, no further extended capabilities exchanges are
2557 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2558 */
2559 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2560 }
2561
2562 /**
2563 * iavf_init_config_adapter - last part of driver startup
2564 * @adapter: board private structure
2565 *
2566 * After all the supported capabilities are negotiated, then the
2567 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2568 */
iavf_init_config_adapter(struct iavf_adapter * adapter)2569 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2570 {
2571 struct net_device *netdev = adapter->netdev;
2572 struct pci_dev *pdev = adapter->pdev;
2573 int err;
2574
2575 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2576
2577 if (iavf_process_config(adapter))
2578 goto err;
2579
2580 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2581
2582 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2583
2584 netdev->netdev_ops = &iavf_netdev_ops;
2585 iavf_set_ethtool_ops(netdev);
2586 netdev->watchdog_timeo = 5 * HZ;
2587
2588 /* MTU range: 68 - 9710 */
2589 netdev->min_mtu = ETH_MIN_MTU;
2590 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2591
2592 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2593 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2594 adapter->hw.mac.addr);
2595 eth_hw_addr_random(netdev);
2596 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2597 } else {
2598 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2599 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2600 }
2601
2602 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2603 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2604 err = iavf_init_interrupt_scheme(adapter);
2605 if (err)
2606 goto err_sw_init;
2607 iavf_map_rings_to_vectors(adapter);
2608 if (adapter->vf_res->vf_cap_flags &
2609 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2610 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2611
2612 err = iavf_request_misc_irq(adapter);
2613 if (err)
2614 goto err_sw_init;
2615
2616 netif_carrier_off(netdev);
2617 adapter->link_up = false;
2618
2619 /* set the semaphore to prevent any callbacks after device registration
2620 * up to time when state of driver will be set to __IAVF_DOWN
2621 */
2622 rtnl_lock();
2623 if (!adapter->netdev_registered) {
2624 err = register_netdevice(netdev);
2625 if (err) {
2626 rtnl_unlock();
2627 goto err_register;
2628 }
2629 }
2630
2631 adapter->netdev_registered = true;
2632
2633 netif_tx_stop_all_queues(netdev);
2634 if (CLIENT_ALLOWED(adapter)) {
2635 err = iavf_lan_add_device(adapter);
2636 if (err)
2637 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2638 err);
2639 }
2640 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2641 if (netdev->features & NETIF_F_GRO)
2642 dev_info(&pdev->dev, "GRO is enabled\n");
2643
2644 iavf_change_state(adapter, __IAVF_DOWN);
2645 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2646 rtnl_unlock();
2647
2648 iavf_misc_irq_enable(adapter);
2649 wake_up(&adapter->down_waitqueue);
2650
2651 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2652 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2653 if (!adapter->rss_key || !adapter->rss_lut) {
2654 err = -ENOMEM;
2655 goto err_mem;
2656 }
2657 if (RSS_AQ(adapter))
2658 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2659 else
2660 iavf_init_rss(adapter);
2661
2662 if (VLAN_V2_ALLOWED(adapter))
2663 /* request initial VLAN offload settings */
2664 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2665
2666 return;
2667 err_mem:
2668 iavf_free_rss(adapter);
2669 err_register:
2670 iavf_free_misc_irq(adapter);
2671 err_sw_init:
2672 iavf_reset_interrupt_capability(adapter);
2673 err:
2674 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2675 }
2676
2677 /**
2678 * iavf_watchdog_task - Periodic call-back task
2679 * @work: pointer to work_struct
2680 **/
iavf_watchdog_task(struct work_struct * work)2681 static void iavf_watchdog_task(struct work_struct *work)
2682 {
2683 struct iavf_adapter *adapter = container_of(work,
2684 struct iavf_adapter,
2685 watchdog_task.work);
2686 struct iavf_hw *hw = &adapter->hw;
2687 u32 reg_val;
2688
2689 if (!mutex_trylock(&adapter->crit_lock)) {
2690 if (adapter->state == __IAVF_REMOVE)
2691 return;
2692
2693 goto restart_watchdog;
2694 }
2695
2696 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2697 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2698
2699 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2700 adapter->aq_required = 0;
2701 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2702 mutex_unlock(&adapter->crit_lock);
2703 queue_work(iavf_wq, &adapter->reset_task);
2704 return;
2705 }
2706
2707 switch (adapter->state) {
2708 case __IAVF_STARTUP:
2709 iavf_startup(adapter);
2710 mutex_unlock(&adapter->crit_lock);
2711 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2712 msecs_to_jiffies(30));
2713 return;
2714 case __IAVF_INIT_VERSION_CHECK:
2715 iavf_init_version_check(adapter);
2716 mutex_unlock(&adapter->crit_lock);
2717 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2718 msecs_to_jiffies(30));
2719 return;
2720 case __IAVF_INIT_GET_RESOURCES:
2721 iavf_init_get_resources(adapter);
2722 mutex_unlock(&adapter->crit_lock);
2723 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2724 msecs_to_jiffies(1));
2725 return;
2726 case __IAVF_INIT_EXTENDED_CAPS:
2727 iavf_init_process_extended_caps(adapter);
2728 mutex_unlock(&adapter->crit_lock);
2729 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2730 msecs_to_jiffies(1));
2731 return;
2732 case __IAVF_INIT_CONFIG_ADAPTER:
2733 iavf_init_config_adapter(adapter);
2734 mutex_unlock(&adapter->crit_lock);
2735 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2736 msecs_to_jiffies(1));
2737 return;
2738 case __IAVF_INIT_FAILED:
2739 if (test_bit(__IAVF_IN_REMOVE_TASK,
2740 &adapter->crit_section)) {
2741 /* Do not update the state and do not reschedule
2742 * watchdog task, iavf_remove should handle this state
2743 * as it can loop forever
2744 */
2745 mutex_unlock(&adapter->crit_lock);
2746 return;
2747 }
2748 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2749 dev_err(&adapter->pdev->dev,
2750 "Failed to communicate with PF; waiting before retry\n");
2751 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2752 iavf_shutdown_adminq(hw);
2753 mutex_unlock(&adapter->crit_lock);
2754 queue_delayed_work(iavf_wq,
2755 &adapter->watchdog_task, (5 * HZ));
2756 return;
2757 }
2758 /* Try again from failed step*/
2759 iavf_change_state(adapter, adapter->last_state);
2760 mutex_unlock(&adapter->crit_lock);
2761 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
2762 return;
2763 case __IAVF_COMM_FAILED:
2764 if (test_bit(__IAVF_IN_REMOVE_TASK,
2765 &adapter->crit_section)) {
2766 /* Set state to __IAVF_INIT_FAILED and perform remove
2767 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2768 * doesn't bring the state back to __IAVF_COMM_FAILED.
2769 */
2770 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2771 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2772 mutex_unlock(&adapter->crit_lock);
2773 return;
2774 }
2775 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2776 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2777 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2778 reg_val == VIRTCHNL_VFR_COMPLETED) {
2779 /* A chance for redemption! */
2780 dev_err(&adapter->pdev->dev,
2781 "Hardware came out of reset. Attempting reinit.\n");
2782 /* When init task contacts the PF and
2783 * gets everything set up again, it'll restart the
2784 * watchdog for us. Down, boy. Sit. Stay. Woof.
2785 */
2786 iavf_change_state(adapter, __IAVF_STARTUP);
2787 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2788 }
2789 adapter->aq_required = 0;
2790 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2791 mutex_unlock(&adapter->crit_lock);
2792 queue_delayed_work(iavf_wq,
2793 &adapter->watchdog_task,
2794 msecs_to_jiffies(10));
2795 return;
2796 case __IAVF_RESETTING:
2797 mutex_unlock(&adapter->crit_lock);
2798 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2799 return;
2800 case __IAVF_DOWN:
2801 case __IAVF_DOWN_PENDING:
2802 case __IAVF_TESTING:
2803 case __IAVF_RUNNING:
2804 if (adapter->current_op) {
2805 if (!iavf_asq_done(hw)) {
2806 dev_dbg(&adapter->pdev->dev,
2807 "Admin queue timeout\n");
2808 iavf_send_api_ver(adapter);
2809 }
2810 } else {
2811 int ret = iavf_process_aq_command(adapter);
2812
2813 /* An error will be returned if no commands were
2814 * processed; use this opportunity to update stats
2815 * if the error isn't -ENOTSUPP
2816 */
2817 if (ret && ret != -EOPNOTSUPP &&
2818 adapter->state == __IAVF_RUNNING)
2819 iavf_request_stats(adapter);
2820 }
2821 if (adapter->state == __IAVF_RUNNING)
2822 iavf_detect_recover_hung(&adapter->vsi);
2823 break;
2824 case __IAVF_REMOVE:
2825 default:
2826 mutex_unlock(&adapter->crit_lock);
2827 return;
2828 }
2829
2830 /* check for hw reset */
2831 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2832 if (!reg_val) {
2833 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2834 adapter->aq_required = 0;
2835 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2836 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2837 queue_work(iavf_wq, &adapter->reset_task);
2838 mutex_unlock(&adapter->crit_lock);
2839 queue_delayed_work(iavf_wq,
2840 &adapter->watchdog_task, HZ * 2);
2841 return;
2842 }
2843
2844 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2845 mutex_unlock(&adapter->crit_lock);
2846 restart_watchdog:
2847 if (adapter->state >= __IAVF_DOWN)
2848 queue_work(iavf_wq, &adapter->adminq_task);
2849 if (adapter->aq_required)
2850 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
2851 msecs_to_jiffies(20));
2852 else
2853 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
2854 }
2855
2856 /**
2857 * iavf_disable_vf - disable VF
2858 * @adapter: board private structure
2859 *
2860 * Set communication failed flag and free all resources.
2861 * NOTE: This function is expected to be called with crit_lock being held.
2862 **/
iavf_disable_vf(struct iavf_adapter * adapter)2863 static void iavf_disable_vf(struct iavf_adapter *adapter)
2864 {
2865 struct iavf_mac_filter *f, *ftmp;
2866 struct iavf_vlan_filter *fv, *fvtmp;
2867 struct iavf_cloud_filter *cf, *cftmp;
2868
2869 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2870
2871 /* We don't use netif_running() because it may be true prior to
2872 * ndo_open() returning, so we can't assume it means all our open
2873 * tasks have finished, since we're not holding the rtnl_lock here.
2874 */
2875 if (adapter->state == __IAVF_RUNNING) {
2876 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2877 netif_carrier_off(adapter->netdev);
2878 netif_tx_disable(adapter->netdev);
2879 adapter->link_up = false;
2880 iavf_napi_disable_all(adapter);
2881 iavf_irq_disable(adapter);
2882 iavf_free_traffic_irqs(adapter);
2883 iavf_free_all_tx_resources(adapter);
2884 iavf_free_all_rx_resources(adapter);
2885 }
2886
2887 spin_lock_bh(&adapter->mac_vlan_list_lock);
2888
2889 /* Delete all of the filters */
2890 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2891 list_del(&f->list);
2892 kfree(f);
2893 }
2894
2895 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2896 list_del(&fv->list);
2897 kfree(fv);
2898 }
2899
2900 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2901
2902 spin_lock_bh(&adapter->cloud_filter_list_lock);
2903 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2904 list_del(&cf->list);
2905 kfree(cf);
2906 adapter->num_cloud_filters--;
2907 }
2908 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2909
2910 iavf_free_misc_irq(adapter);
2911 iavf_reset_interrupt_capability(adapter);
2912 iavf_free_q_vectors(adapter);
2913 iavf_free_queues(adapter);
2914 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2915 iavf_shutdown_adminq(&adapter->hw);
2916 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2917 iavf_change_state(adapter, __IAVF_DOWN);
2918 wake_up(&adapter->down_waitqueue);
2919 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2920 }
2921
2922 /**
2923 * iavf_reset_task - Call-back task to handle hardware reset
2924 * @work: pointer to work_struct
2925 *
2926 * During reset we need to shut down and reinitialize the admin queue
2927 * before we can use it to communicate with the PF again. We also clear
2928 * and reinit the rings because that context is lost as well.
2929 **/
iavf_reset_task(struct work_struct * work)2930 static void iavf_reset_task(struct work_struct *work)
2931 {
2932 struct iavf_adapter *adapter = container_of(work,
2933 struct iavf_adapter,
2934 reset_task);
2935 struct virtchnl_vf_resource *vfres = adapter->vf_res;
2936 struct net_device *netdev = adapter->netdev;
2937 struct iavf_hw *hw = &adapter->hw;
2938 struct iavf_mac_filter *f, *ftmp;
2939 struct iavf_cloud_filter *cf;
2940 enum iavf_status status;
2941 u32 reg_val;
2942 int i = 0, err;
2943 bool running;
2944
2945 /* Detach interface to avoid subsequent NDO callbacks */
2946 rtnl_lock();
2947 netif_device_detach(netdev);
2948 rtnl_unlock();
2949
2950 /* When device is being removed it doesn't make sense to run the reset
2951 * task, just return in such a case.
2952 */
2953 if (!mutex_trylock(&adapter->crit_lock)) {
2954 if (adapter->state != __IAVF_REMOVE)
2955 queue_work(iavf_wq, &adapter->reset_task);
2956
2957 goto reset_finish;
2958 }
2959
2960 while (!mutex_trylock(&adapter->client_lock))
2961 usleep_range(500, 1000);
2962 if (CLIENT_ENABLED(adapter)) {
2963 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2964 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2965 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2966 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2967 cancel_delayed_work_sync(&adapter->client_task);
2968 iavf_notify_client_close(&adapter->vsi, true);
2969 }
2970 iavf_misc_irq_disable(adapter);
2971 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2972 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2973 /* Restart the AQ here. If we have been reset but didn't
2974 * detect it, or if the PF had to reinit, our AQ will be hosed.
2975 */
2976 iavf_shutdown_adminq(hw);
2977 iavf_init_adminq(hw);
2978 iavf_request_reset(adapter);
2979 }
2980 adapter->flags |= IAVF_FLAG_RESET_PENDING;
2981
2982 /* poll until we see the reset actually happen */
2983 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2984 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2985 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2986 if (!reg_val)
2987 break;
2988 usleep_range(5000, 10000);
2989 }
2990 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2991 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2992 goto continue_reset; /* act like the reset happened */
2993 }
2994
2995 /* wait until the reset is complete and the PF is responding to us */
2996 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2997 /* sleep first to make sure a minimum wait time is met */
2998 msleep(IAVF_RESET_WAIT_MS);
2999
3000 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3001 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3002 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3003 break;
3004 }
3005
3006 pci_set_master(adapter->pdev);
3007 pci_restore_msi_state(adapter->pdev);
3008
3009 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3010 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3011 reg_val);
3012 iavf_disable_vf(adapter);
3013 mutex_unlock(&adapter->client_lock);
3014 mutex_unlock(&adapter->crit_lock);
3015 if (netif_running(netdev)) {
3016 rtnl_lock();
3017 dev_close(netdev);
3018 rtnl_unlock();
3019 }
3020 return; /* Do not attempt to reinit. It's dead, Jim. */
3021 }
3022
3023 continue_reset:
3024 /* We don't use netif_running() because it may be true prior to
3025 * ndo_open() returning, so we can't assume it means all our open
3026 * tasks have finished, since we're not holding the rtnl_lock here.
3027 */
3028 running = adapter->state == __IAVF_RUNNING;
3029
3030 if (running) {
3031 netif_carrier_off(netdev);
3032 netif_tx_stop_all_queues(netdev);
3033 adapter->link_up = false;
3034 iavf_napi_disable_all(adapter);
3035 }
3036 iavf_irq_disable(adapter);
3037
3038 iavf_change_state(adapter, __IAVF_RESETTING);
3039 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3040
3041 /* free the Tx/Rx rings and descriptors, might be better to just
3042 * re-use them sometime in the future
3043 */
3044 iavf_free_all_rx_resources(adapter);
3045 iavf_free_all_tx_resources(adapter);
3046
3047 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3048 /* kill and reinit the admin queue */
3049 iavf_shutdown_adminq(hw);
3050 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3051 status = iavf_init_adminq(hw);
3052 if (status) {
3053 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3054 status);
3055 goto reset_err;
3056 }
3057 adapter->aq_required = 0;
3058
3059 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3060 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3061 err = iavf_reinit_interrupt_scheme(adapter);
3062 if (err)
3063 goto reset_err;
3064 }
3065
3066 if (RSS_AQ(adapter)) {
3067 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3068 } else {
3069 err = iavf_init_rss(adapter);
3070 if (err)
3071 goto reset_err;
3072 }
3073
3074 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3075 /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3076 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3077 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3078 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3079 * been successfully sent and negotiated
3080 */
3081 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3082 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3083
3084 spin_lock_bh(&adapter->mac_vlan_list_lock);
3085
3086 /* Delete filter for the current MAC address, it could have
3087 * been changed by the PF via administratively set MAC.
3088 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3089 */
3090 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3091 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3092 list_del(&f->list);
3093 kfree(f);
3094 }
3095 }
3096 /* re-add all MAC filters */
3097 list_for_each_entry(f, &adapter->mac_filter_list, list) {
3098 f->add = true;
3099 }
3100 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3101
3102 /* check if TCs are running and re-add all cloud filters */
3103 spin_lock_bh(&adapter->cloud_filter_list_lock);
3104 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3105 adapter->num_tc) {
3106 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3107 cf->add = true;
3108 }
3109 }
3110 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3111
3112 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3113 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3114 iavf_misc_irq_enable(adapter);
3115
3116 bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID);
3117 bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID);
3118
3119 mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
3120
3121 /* We were running when the reset started, so we need to restore some
3122 * state here.
3123 */
3124 if (running) {
3125 /* allocate transmit descriptors */
3126 err = iavf_setup_all_tx_resources(adapter);
3127 if (err)
3128 goto reset_err;
3129
3130 /* allocate receive descriptors */
3131 err = iavf_setup_all_rx_resources(adapter);
3132 if (err)
3133 goto reset_err;
3134
3135 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3136 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3137 err = iavf_request_traffic_irqs(adapter, netdev->name);
3138 if (err)
3139 goto reset_err;
3140
3141 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3142 }
3143
3144 iavf_configure(adapter);
3145
3146 /* iavf_up_complete() will switch device back
3147 * to __IAVF_RUNNING
3148 */
3149 iavf_up_complete(adapter);
3150
3151 iavf_irq_enable(adapter, true);
3152 } else {
3153 iavf_change_state(adapter, __IAVF_DOWN);
3154 wake_up(&adapter->down_waitqueue);
3155 }
3156
3157 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3158
3159 mutex_unlock(&adapter->client_lock);
3160 mutex_unlock(&adapter->crit_lock);
3161
3162 goto reset_finish;
3163 reset_err:
3164 if (running) {
3165 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3166 iavf_free_traffic_irqs(adapter);
3167 }
3168 iavf_disable_vf(adapter);
3169
3170 mutex_unlock(&adapter->client_lock);
3171 mutex_unlock(&adapter->crit_lock);
3172
3173 if (netif_running(netdev)) {
3174 /* Close device to ensure that Tx queues will not be started
3175 * during netif_device_attach() at the end of the reset task.
3176 */
3177 rtnl_lock();
3178 dev_close(netdev);
3179 rtnl_unlock();
3180 }
3181
3182 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3183 reset_finish:
3184 rtnl_lock();
3185 netif_device_attach(netdev);
3186 rtnl_unlock();
3187 }
3188
3189 /**
3190 * iavf_adminq_task - worker thread to clean the admin queue
3191 * @work: pointer to work_struct containing our data
3192 **/
iavf_adminq_task(struct work_struct * work)3193 static void iavf_adminq_task(struct work_struct *work)
3194 {
3195 struct iavf_adapter *adapter =
3196 container_of(work, struct iavf_adapter, adminq_task);
3197 struct iavf_hw *hw = &adapter->hw;
3198 struct iavf_arq_event_info event;
3199 enum virtchnl_ops v_op;
3200 enum iavf_status ret, v_ret;
3201 u32 val, oldval;
3202 u16 pending;
3203
3204 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3205 goto out;
3206
3207 if (!mutex_trylock(&adapter->crit_lock)) {
3208 if (adapter->state == __IAVF_REMOVE)
3209 return;
3210
3211 queue_work(iavf_wq, &adapter->adminq_task);
3212 goto out;
3213 }
3214
3215 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3216 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3217 if (!event.msg_buf)
3218 goto out;
3219
3220 do {
3221 ret = iavf_clean_arq_element(hw, &event, &pending);
3222 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3223 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3224
3225 if (ret || !v_op)
3226 break; /* No event to process or error cleaning ARQ */
3227
3228 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3229 event.msg_len);
3230 if (pending != 0)
3231 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3232 } while (pending);
3233 mutex_unlock(&adapter->crit_lock);
3234
3235 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) {
3236 if (adapter->netdev_registered ||
3237 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
3238 struct net_device *netdev = adapter->netdev;
3239
3240 rtnl_lock();
3241 netdev_update_features(netdev);
3242 rtnl_unlock();
3243 /* Request VLAN offload settings */
3244 if (VLAN_V2_ALLOWED(adapter))
3245 iavf_set_vlan_offload_features
3246 (adapter, 0, netdev->features);
3247
3248 iavf_set_queue_vlan_tag_loc(adapter);
3249 }
3250
3251 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
3252 }
3253 if ((adapter->flags &
3254 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
3255 adapter->state == __IAVF_RESETTING)
3256 goto freedom;
3257
3258 /* check for error indications */
3259 val = rd32(hw, hw->aq.arq.len);
3260 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3261 goto freedom;
3262 oldval = val;
3263 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3264 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3265 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3266 }
3267 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3268 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3269 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3270 }
3271 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3272 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3273 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3274 }
3275 if (oldval != val)
3276 wr32(hw, hw->aq.arq.len, val);
3277
3278 val = rd32(hw, hw->aq.asq.len);
3279 oldval = val;
3280 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3281 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3282 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3283 }
3284 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3285 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3286 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3287 }
3288 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3289 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3290 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3291 }
3292 if (oldval != val)
3293 wr32(hw, hw->aq.asq.len, val);
3294
3295 freedom:
3296 kfree(event.msg_buf);
3297 out:
3298 /* re-enable Admin queue interrupt cause */
3299 iavf_misc_irq_enable(adapter);
3300 }
3301
3302 /**
3303 * iavf_client_task - worker thread to perform client work
3304 * @work: pointer to work_struct containing our data
3305 *
3306 * This task handles client interactions. Because client calls can be
3307 * reentrant, we can't handle them in the watchdog.
3308 **/
iavf_client_task(struct work_struct * work)3309 static void iavf_client_task(struct work_struct *work)
3310 {
3311 struct iavf_adapter *adapter =
3312 container_of(work, struct iavf_adapter, client_task.work);
3313
3314 /* If we can't get the client bit, just give up. We'll be rescheduled
3315 * later.
3316 */
3317
3318 if (!mutex_trylock(&adapter->client_lock))
3319 return;
3320
3321 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3322 iavf_client_subtask(adapter);
3323 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3324 goto out;
3325 }
3326 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3327 iavf_notify_client_l2_params(&adapter->vsi);
3328 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3329 goto out;
3330 }
3331 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3332 iavf_notify_client_close(&adapter->vsi, false);
3333 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3334 goto out;
3335 }
3336 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3337 iavf_notify_client_open(&adapter->vsi);
3338 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3339 }
3340 out:
3341 mutex_unlock(&adapter->client_lock);
3342 }
3343
3344 /**
3345 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3346 * @adapter: board private structure
3347 *
3348 * Free all transmit software resources
3349 **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)3350 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3351 {
3352 int i;
3353
3354 if (!adapter->tx_rings)
3355 return;
3356
3357 for (i = 0; i < adapter->num_active_queues; i++)
3358 if (adapter->tx_rings[i].desc)
3359 iavf_free_tx_resources(&adapter->tx_rings[i]);
3360 }
3361
3362 /**
3363 * iavf_setup_all_tx_resources - allocate all queues Tx resources
3364 * @adapter: board private structure
3365 *
3366 * If this function returns with an error, then it's possible one or
3367 * more of the rings is populated (while the rest are not). It is the
3368 * callers duty to clean those orphaned rings.
3369 *
3370 * Return 0 on success, negative on failure
3371 **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)3372 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3373 {
3374 int i, err = 0;
3375
3376 for (i = 0; i < adapter->num_active_queues; i++) {
3377 adapter->tx_rings[i].count = adapter->tx_desc_count;
3378 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3379 if (!err)
3380 continue;
3381 dev_err(&adapter->pdev->dev,
3382 "Allocation for Tx Queue %u failed\n", i);
3383 break;
3384 }
3385
3386 return err;
3387 }
3388
3389 /**
3390 * iavf_setup_all_rx_resources - allocate all queues Rx resources
3391 * @adapter: board private structure
3392 *
3393 * If this function returns with an error, then it's possible one or
3394 * more of the rings is populated (while the rest are not). It is the
3395 * callers duty to clean those orphaned rings.
3396 *
3397 * Return 0 on success, negative on failure
3398 **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)3399 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3400 {
3401 int i, err = 0;
3402
3403 for (i = 0; i < adapter->num_active_queues; i++) {
3404 adapter->rx_rings[i].count = adapter->rx_desc_count;
3405 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3406 if (!err)
3407 continue;
3408 dev_err(&adapter->pdev->dev,
3409 "Allocation for Rx Queue %u failed\n", i);
3410 break;
3411 }
3412 return err;
3413 }
3414
3415 /**
3416 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3417 * @adapter: board private structure
3418 *
3419 * Free all receive software resources
3420 **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)3421 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3422 {
3423 int i;
3424
3425 if (!adapter->rx_rings)
3426 return;
3427
3428 for (i = 0; i < adapter->num_active_queues; i++)
3429 if (adapter->rx_rings[i].desc)
3430 iavf_free_rx_resources(&adapter->rx_rings[i]);
3431 }
3432
3433 /**
3434 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3435 * @adapter: board private structure
3436 * @max_tx_rate: max Tx bw for a tc
3437 **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)3438 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3439 u64 max_tx_rate)
3440 {
3441 int speed = 0, ret = 0;
3442
3443 if (ADV_LINK_SUPPORT(adapter)) {
3444 if (adapter->link_speed_mbps < U32_MAX) {
3445 speed = adapter->link_speed_mbps;
3446 goto validate_bw;
3447 } else {
3448 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3449 return -EINVAL;
3450 }
3451 }
3452
3453 switch (adapter->link_speed) {
3454 case VIRTCHNL_LINK_SPEED_40GB:
3455 speed = SPEED_40000;
3456 break;
3457 case VIRTCHNL_LINK_SPEED_25GB:
3458 speed = SPEED_25000;
3459 break;
3460 case VIRTCHNL_LINK_SPEED_20GB:
3461 speed = SPEED_20000;
3462 break;
3463 case VIRTCHNL_LINK_SPEED_10GB:
3464 speed = SPEED_10000;
3465 break;
3466 case VIRTCHNL_LINK_SPEED_5GB:
3467 speed = SPEED_5000;
3468 break;
3469 case VIRTCHNL_LINK_SPEED_2_5GB:
3470 speed = SPEED_2500;
3471 break;
3472 case VIRTCHNL_LINK_SPEED_1GB:
3473 speed = SPEED_1000;
3474 break;
3475 case VIRTCHNL_LINK_SPEED_100MB:
3476 speed = SPEED_100;
3477 break;
3478 default:
3479 break;
3480 }
3481
3482 validate_bw:
3483 if (max_tx_rate > speed) {
3484 dev_err(&adapter->pdev->dev,
3485 "Invalid tx rate specified\n");
3486 ret = -EINVAL;
3487 }
3488
3489 return ret;
3490 }
3491
3492 /**
3493 * iavf_validate_ch_config - validate queue mapping info
3494 * @adapter: board private structure
3495 * @mqprio_qopt: queue parameters
3496 *
3497 * This function validates if the config provided by the user to
3498 * configure queue channels is valid or not. Returns 0 on a valid
3499 * config.
3500 **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)3501 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3502 struct tc_mqprio_qopt_offload *mqprio_qopt)
3503 {
3504 u64 total_max_rate = 0;
3505 u32 tx_rate_rem = 0;
3506 int i, num_qps = 0;
3507 u64 tx_rate = 0;
3508 int ret = 0;
3509
3510 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3511 mqprio_qopt->qopt.num_tc < 1)
3512 return -EINVAL;
3513
3514 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3515 if (!mqprio_qopt->qopt.count[i] ||
3516 mqprio_qopt->qopt.offset[i] != num_qps)
3517 return -EINVAL;
3518 if (mqprio_qopt->min_rate[i]) {
3519 dev_err(&adapter->pdev->dev,
3520 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3521 i);
3522 return -EINVAL;
3523 }
3524
3525 /* convert to Mbps */
3526 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3527 IAVF_MBPS_DIVISOR);
3528
3529 if (mqprio_qopt->max_rate[i] &&
3530 tx_rate < IAVF_MBPS_QUANTA) {
3531 dev_err(&adapter->pdev->dev,
3532 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3533 i, IAVF_MBPS_QUANTA);
3534 return -EINVAL;
3535 }
3536
3537 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3538
3539 if (tx_rate_rem != 0) {
3540 dev_err(&adapter->pdev->dev,
3541 "Invalid max tx rate for TC%d, not divisible by %d\n",
3542 i, IAVF_MBPS_QUANTA);
3543 return -EINVAL;
3544 }
3545
3546 total_max_rate += tx_rate;
3547 num_qps += mqprio_qopt->qopt.count[i];
3548 }
3549 if (num_qps > adapter->num_active_queues) {
3550 dev_err(&adapter->pdev->dev,
3551 "Cannot support requested number of queues\n");
3552 return -EINVAL;
3553 }
3554
3555 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3556 return ret;
3557 }
3558
3559 /**
3560 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3561 * @adapter: board private structure
3562 **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)3563 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3564 {
3565 struct iavf_cloud_filter *cf, *cftmp;
3566
3567 spin_lock_bh(&adapter->cloud_filter_list_lock);
3568 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3569 list) {
3570 list_del(&cf->list);
3571 kfree(cf);
3572 adapter->num_cloud_filters--;
3573 }
3574 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3575 }
3576
3577 /**
3578 * __iavf_setup_tc - configure multiple traffic classes
3579 * @netdev: network interface device structure
3580 * @type_data: tc offload data
3581 *
3582 * This function processes the config information provided by the
3583 * user to configure traffic classes/queue channels and packages the
3584 * information to request the PF to setup traffic classes.
3585 *
3586 * Returns 0 on success.
3587 **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)3588 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3589 {
3590 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3591 struct iavf_adapter *adapter = netdev_priv(netdev);
3592 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3593 u8 num_tc = 0, total_qps = 0;
3594 int ret = 0, netdev_tc = 0;
3595 u64 max_tx_rate;
3596 u16 mode;
3597 int i;
3598
3599 num_tc = mqprio_qopt->qopt.num_tc;
3600 mode = mqprio_qopt->mode;
3601
3602 /* delete queue_channel */
3603 if (!mqprio_qopt->qopt.hw) {
3604 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3605 /* reset the tc configuration */
3606 netdev_reset_tc(netdev);
3607 adapter->num_tc = 0;
3608 netif_tx_stop_all_queues(netdev);
3609 netif_tx_disable(netdev);
3610 iavf_del_all_cloud_filters(adapter);
3611 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3612 total_qps = adapter->orig_num_active_queues;
3613 goto exit;
3614 } else {
3615 return -EINVAL;
3616 }
3617 }
3618
3619 /* add queue channel */
3620 if (mode == TC_MQPRIO_MODE_CHANNEL) {
3621 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3622 dev_err(&adapter->pdev->dev, "ADq not supported\n");
3623 return -EOPNOTSUPP;
3624 }
3625 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3626 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3627 return -EINVAL;
3628 }
3629
3630 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3631 if (ret)
3632 return ret;
3633 /* Return if same TC config is requested */
3634 if (adapter->num_tc == num_tc)
3635 return 0;
3636 adapter->num_tc = num_tc;
3637
3638 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3639 if (i < num_tc) {
3640 adapter->ch_config.ch_info[i].count =
3641 mqprio_qopt->qopt.count[i];
3642 adapter->ch_config.ch_info[i].offset =
3643 mqprio_qopt->qopt.offset[i];
3644 total_qps += mqprio_qopt->qopt.count[i];
3645 max_tx_rate = mqprio_qopt->max_rate[i];
3646 /* convert to Mbps */
3647 max_tx_rate = div_u64(max_tx_rate,
3648 IAVF_MBPS_DIVISOR);
3649 adapter->ch_config.ch_info[i].max_tx_rate =
3650 max_tx_rate;
3651 } else {
3652 adapter->ch_config.ch_info[i].count = 1;
3653 adapter->ch_config.ch_info[i].offset = 0;
3654 }
3655 }
3656
3657 /* Take snapshot of original config such as "num_active_queues"
3658 * It is used later when delete ADQ flow is exercised, so that
3659 * once delete ADQ flow completes, VF shall go back to its
3660 * original queue configuration
3661 */
3662
3663 adapter->orig_num_active_queues = adapter->num_active_queues;
3664
3665 /* Store queue info based on TC so that VF gets configured
3666 * with correct number of queues when VF completes ADQ config
3667 * flow
3668 */
3669 adapter->ch_config.total_qps = total_qps;
3670
3671 netif_tx_stop_all_queues(netdev);
3672 netif_tx_disable(netdev);
3673 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3674 netdev_reset_tc(netdev);
3675 /* Report the tc mapping up the stack */
3676 netdev_set_num_tc(adapter->netdev, num_tc);
3677 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3678 u16 qcount = mqprio_qopt->qopt.count[i];
3679 u16 qoffset = mqprio_qopt->qopt.offset[i];
3680
3681 if (i < num_tc)
3682 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3683 qoffset);
3684 }
3685 }
3686 exit:
3687 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3688 return 0;
3689
3690 netif_set_real_num_rx_queues(netdev, total_qps);
3691 netif_set_real_num_tx_queues(netdev, total_qps);
3692
3693 return ret;
3694 }
3695
3696 /**
3697 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3698 * @adapter: board private structure
3699 * @f: pointer to struct flow_cls_offload
3700 * @filter: pointer to cloud filter structure
3701 */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)3702 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3703 struct flow_cls_offload *f,
3704 struct iavf_cloud_filter *filter)
3705 {
3706 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3707 struct flow_dissector *dissector = rule->match.dissector;
3708 u16 n_proto_mask = 0;
3709 u16 n_proto_key = 0;
3710 u8 field_flags = 0;
3711 u16 addr_type = 0;
3712 u16 n_proto = 0;
3713 int i = 0;
3714 struct virtchnl_filter *vf = &filter->f;
3715
3716 if (dissector->used_keys &
3717 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
3718 BIT(FLOW_DISSECTOR_KEY_BASIC) |
3719 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3720 BIT(FLOW_DISSECTOR_KEY_VLAN) |
3721 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3722 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3723 BIT(FLOW_DISSECTOR_KEY_PORTS) |
3724 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3725 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
3726 dissector->used_keys);
3727 return -EOPNOTSUPP;
3728 }
3729
3730 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3731 struct flow_match_enc_keyid match;
3732
3733 flow_rule_match_enc_keyid(rule, &match);
3734 if (match.mask->keyid != 0)
3735 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3736 }
3737
3738 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3739 struct flow_match_basic match;
3740
3741 flow_rule_match_basic(rule, &match);
3742 n_proto_key = ntohs(match.key->n_proto);
3743 n_proto_mask = ntohs(match.mask->n_proto);
3744
3745 if (n_proto_key == ETH_P_ALL) {
3746 n_proto_key = 0;
3747 n_proto_mask = 0;
3748 }
3749 n_proto = n_proto_key & n_proto_mask;
3750 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3751 return -EINVAL;
3752 if (n_proto == ETH_P_IPV6) {
3753 /* specify flow type as TCP IPv6 */
3754 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3755 }
3756
3757 if (match.key->ip_proto != IPPROTO_TCP) {
3758 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3759 return -EINVAL;
3760 }
3761 }
3762
3763 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3764 struct flow_match_eth_addrs match;
3765
3766 flow_rule_match_eth_addrs(rule, &match);
3767
3768 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3769 if (!is_zero_ether_addr(match.mask->dst)) {
3770 if (is_broadcast_ether_addr(match.mask->dst)) {
3771 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3772 } else {
3773 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3774 match.mask->dst);
3775 return -EINVAL;
3776 }
3777 }
3778
3779 if (!is_zero_ether_addr(match.mask->src)) {
3780 if (is_broadcast_ether_addr(match.mask->src)) {
3781 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3782 } else {
3783 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3784 match.mask->src);
3785 return -EINVAL;
3786 }
3787 }
3788
3789 if (!is_zero_ether_addr(match.key->dst))
3790 if (is_valid_ether_addr(match.key->dst) ||
3791 is_multicast_ether_addr(match.key->dst)) {
3792 /* set the mask if a valid dst_mac address */
3793 for (i = 0; i < ETH_ALEN; i++)
3794 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3795 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3796 match.key->dst);
3797 }
3798
3799 if (!is_zero_ether_addr(match.key->src))
3800 if (is_valid_ether_addr(match.key->src) ||
3801 is_multicast_ether_addr(match.key->src)) {
3802 /* set the mask if a valid dst_mac address */
3803 for (i = 0; i < ETH_ALEN; i++)
3804 vf->mask.tcp_spec.src_mac[i] |= 0xff;
3805 ether_addr_copy(vf->data.tcp_spec.src_mac,
3806 match.key->src);
3807 }
3808 }
3809
3810 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3811 struct flow_match_vlan match;
3812
3813 flow_rule_match_vlan(rule, &match);
3814 if (match.mask->vlan_id) {
3815 if (match.mask->vlan_id == VLAN_VID_MASK) {
3816 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3817 } else {
3818 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3819 match.mask->vlan_id);
3820 return -EINVAL;
3821 }
3822 }
3823 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3824 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3825 }
3826
3827 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3828 struct flow_match_control match;
3829
3830 flow_rule_match_control(rule, &match);
3831 addr_type = match.key->addr_type;
3832 }
3833
3834 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3835 struct flow_match_ipv4_addrs match;
3836
3837 flow_rule_match_ipv4_addrs(rule, &match);
3838 if (match.mask->dst) {
3839 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3840 field_flags |= IAVF_CLOUD_FIELD_IIP;
3841 } else {
3842 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3843 be32_to_cpu(match.mask->dst));
3844 return -EINVAL;
3845 }
3846 }
3847
3848 if (match.mask->src) {
3849 if (match.mask->src == cpu_to_be32(0xffffffff)) {
3850 field_flags |= IAVF_CLOUD_FIELD_IIP;
3851 } else {
3852 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3853 be32_to_cpu(match.mask->dst));
3854 return -EINVAL;
3855 }
3856 }
3857
3858 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3859 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3860 return -EINVAL;
3861 }
3862 if (match.key->dst) {
3863 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3864 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3865 }
3866 if (match.key->src) {
3867 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3868 vf->data.tcp_spec.src_ip[0] = match.key->src;
3869 }
3870 }
3871
3872 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3873 struct flow_match_ipv6_addrs match;
3874
3875 flow_rule_match_ipv6_addrs(rule, &match);
3876
3877 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3878 if (ipv6_addr_any(&match.mask->dst)) {
3879 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3880 IPV6_ADDR_ANY);
3881 return -EINVAL;
3882 }
3883
3884 /* src and dest IPv6 address should not be LOOPBACK
3885 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3886 */
3887 if (ipv6_addr_loopback(&match.key->dst) ||
3888 ipv6_addr_loopback(&match.key->src)) {
3889 dev_err(&adapter->pdev->dev,
3890 "ipv6 addr should not be loopback\n");
3891 return -EINVAL;
3892 }
3893 if (!ipv6_addr_any(&match.mask->dst) ||
3894 !ipv6_addr_any(&match.mask->src))
3895 field_flags |= IAVF_CLOUD_FIELD_IIP;
3896
3897 for (i = 0; i < 4; i++)
3898 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3899 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3900 sizeof(vf->data.tcp_spec.dst_ip));
3901 for (i = 0; i < 4; i++)
3902 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3903 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3904 sizeof(vf->data.tcp_spec.src_ip));
3905 }
3906 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3907 struct flow_match_ports match;
3908
3909 flow_rule_match_ports(rule, &match);
3910 if (match.mask->src) {
3911 if (match.mask->src == cpu_to_be16(0xffff)) {
3912 field_flags |= IAVF_CLOUD_FIELD_IIP;
3913 } else {
3914 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3915 be16_to_cpu(match.mask->src));
3916 return -EINVAL;
3917 }
3918 }
3919
3920 if (match.mask->dst) {
3921 if (match.mask->dst == cpu_to_be16(0xffff)) {
3922 field_flags |= IAVF_CLOUD_FIELD_IIP;
3923 } else {
3924 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3925 be16_to_cpu(match.mask->dst));
3926 return -EINVAL;
3927 }
3928 }
3929 if (match.key->dst) {
3930 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3931 vf->data.tcp_spec.dst_port = match.key->dst;
3932 }
3933
3934 if (match.key->src) {
3935 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3936 vf->data.tcp_spec.src_port = match.key->src;
3937 }
3938 }
3939 vf->field_flags = field_flags;
3940
3941 return 0;
3942 }
3943
3944 /**
3945 * iavf_handle_tclass - Forward to a traffic class on the device
3946 * @adapter: board private structure
3947 * @tc: traffic class index on the device
3948 * @filter: pointer to cloud filter structure
3949 */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)3950 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3951 struct iavf_cloud_filter *filter)
3952 {
3953 if (tc == 0)
3954 return 0;
3955 if (tc < adapter->num_tc) {
3956 if (!filter->f.data.tcp_spec.dst_port) {
3957 dev_err(&adapter->pdev->dev,
3958 "Specify destination port to redirect to traffic class other than TC0\n");
3959 return -EINVAL;
3960 }
3961 }
3962 /* redirect to a traffic class on the same device */
3963 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3964 filter->f.action_meta = tc;
3965 return 0;
3966 }
3967
3968 /**
3969 * iavf_find_cf - Find the cloud filter in the list
3970 * @adapter: Board private structure
3971 * @cookie: filter specific cookie
3972 *
3973 * Returns ptr to the filter object or NULL. Must be called while holding the
3974 * cloud_filter_list_lock.
3975 */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)3976 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3977 unsigned long *cookie)
3978 {
3979 struct iavf_cloud_filter *filter = NULL;
3980
3981 if (!cookie)
3982 return NULL;
3983
3984 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3985 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3986 return filter;
3987 }
3988 return NULL;
3989 }
3990
3991 /**
3992 * iavf_configure_clsflower - Add tc flower filters
3993 * @adapter: board private structure
3994 * @cls_flower: Pointer to struct flow_cls_offload
3995 */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)3996 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3997 struct flow_cls_offload *cls_flower)
3998 {
3999 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4000 struct iavf_cloud_filter *filter = NULL;
4001 int err = -EINVAL, count = 50;
4002
4003 if (tc < 0) {
4004 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4005 return -EINVAL;
4006 }
4007
4008 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4009 if (!filter)
4010 return -ENOMEM;
4011
4012 while (!mutex_trylock(&adapter->crit_lock)) {
4013 if (--count == 0) {
4014 kfree(filter);
4015 return err;
4016 }
4017 udelay(1);
4018 }
4019
4020 filter->cookie = cls_flower->cookie;
4021
4022 /* bail out here if filter already exists */
4023 spin_lock_bh(&adapter->cloud_filter_list_lock);
4024 if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4025 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4026 err = -EEXIST;
4027 goto spin_unlock;
4028 }
4029 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4030
4031 /* set the mask to all zeroes to begin with */
4032 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4033 /* start out with flow type and eth type IPv4 to begin with */
4034 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4035 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4036 if (err)
4037 goto err;
4038
4039 err = iavf_handle_tclass(adapter, tc, filter);
4040 if (err)
4041 goto err;
4042
4043 /* add filter to the list */
4044 spin_lock_bh(&adapter->cloud_filter_list_lock);
4045 list_add_tail(&filter->list, &adapter->cloud_filter_list);
4046 adapter->num_cloud_filters++;
4047 filter->add = true;
4048 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4049 spin_unlock:
4050 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4051 err:
4052 if (err)
4053 kfree(filter);
4054
4055 mutex_unlock(&adapter->crit_lock);
4056 return err;
4057 }
4058
4059 /**
4060 * iavf_delete_clsflower - Remove tc flower filters
4061 * @adapter: board private structure
4062 * @cls_flower: Pointer to struct flow_cls_offload
4063 */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4064 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4065 struct flow_cls_offload *cls_flower)
4066 {
4067 struct iavf_cloud_filter *filter = NULL;
4068 int err = 0;
4069
4070 spin_lock_bh(&adapter->cloud_filter_list_lock);
4071 filter = iavf_find_cf(adapter, &cls_flower->cookie);
4072 if (filter) {
4073 filter->del = true;
4074 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4075 } else {
4076 err = -EINVAL;
4077 }
4078 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4079
4080 return err;
4081 }
4082
4083 /**
4084 * iavf_setup_tc_cls_flower - flower classifier offloads
4085 * @adapter: board private structure
4086 * @cls_flower: pointer to flow_cls_offload struct with flow info
4087 */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4088 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4089 struct flow_cls_offload *cls_flower)
4090 {
4091 switch (cls_flower->command) {
4092 case FLOW_CLS_REPLACE:
4093 return iavf_configure_clsflower(adapter, cls_flower);
4094 case FLOW_CLS_DESTROY:
4095 return iavf_delete_clsflower(adapter, cls_flower);
4096 case FLOW_CLS_STATS:
4097 return -EOPNOTSUPP;
4098 default:
4099 return -EOPNOTSUPP;
4100 }
4101 }
4102
4103 /**
4104 * iavf_setup_tc_block_cb - block callback for tc
4105 * @type: type of offload
4106 * @type_data: offload data
4107 * @cb_priv:
4108 *
4109 * This function is the block callback for traffic classes
4110 **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4111 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4112 void *cb_priv)
4113 {
4114 struct iavf_adapter *adapter = cb_priv;
4115
4116 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4117 return -EOPNOTSUPP;
4118
4119 switch (type) {
4120 case TC_SETUP_CLSFLOWER:
4121 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4122 default:
4123 return -EOPNOTSUPP;
4124 }
4125 }
4126
4127 static LIST_HEAD(iavf_block_cb_list);
4128
4129 /**
4130 * iavf_setup_tc - configure multiple traffic classes
4131 * @netdev: network interface device structure
4132 * @type: type of offload
4133 * @type_data: tc offload data
4134 *
4135 * This function is the callback to ndo_setup_tc in the
4136 * netdev_ops.
4137 *
4138 * Returns 0 on success
4139 **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)4140 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4141 void *type_data)
4142 {
4143 struct iavf_adapter *adapter = netdev_priv(netdev);
4144
4145 switch (type) {
4146 case TC_SETUP_QDISC_MQPRIO:
4147 return __iavf_setup_tc(netdev, type_data);
4148 case TC_SETUP_BLOCK:
4149 return flow_block_cb_setup_simple(type_data,
4150 &iavf_block_cb_list,
4151 iavf_setup_tc_block_cb,
4152 adapter, adapter, true);
4153 default:
4154 return -EOPNOTSUPP;
4155 }
4156 }
4157
4158 /**
4159 * iavf_open - Called when a network interface is made active
4160 * @netdev: network interface device structure
4161 *
4162 * Returns 0 on success, negative value on failure
4163 *
4164 * The open entry point is called when a network interface is made
4165 * active by the system (IFF_UP). At this point all resources needed
4166 * for transmit and receive operations are allocated, the interrupt
4167 * handler is registered with the OS, the watchdog is started,
4168 * and the stack is notified that the interface is ready.
4169 **/
iavf_open(struct net_device * netdev)4170 static int iavf_open(struct net_device *netdev)
4171 {
4172 struct iavf_adapter *adapter = netdev_priv(netdev);
4173 int err;
4174
4175 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4176 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4177 return -EIO;
4178 }
4179
4180 while (!mutex_trylock(&adapter->crit_lock)) {
4181 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4182 * is already taken and iavf_open is called from an upper
4183 * device's notifier reacting on NETDEV_REGISTER event.
4184 * We have to leave here to avoid dead lock.
4185 */
4186 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4187 return -EBUSY;
4188
4189 usleep_range(500, 1000);
4190 }
4191
4192 if (adapter->state != __IAVF_DOWN) {
4193 err = -EBUSY;
4194 goto err_unlock;
4195 }
4196
4197 if (adapter->state == __IAVF_RUNNING &&
4198 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4199 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4200 err = 0;
4201 goto err_unlock;
4202 }
4203
4204 /* allocate transmit descriptors */
4205 err = iavf_setup_all_tx_resources(adapter);
4206 if (err)
4207 goto err_setup_tx;
4208
4209 /* allocate receive descriptors */
4210 err = iavf_setup_all_rx_resources(adapter);
4211 if (err)
4212 goto err_setup_rx;
4213
4214 /* clear any pending interrupts, may auto mask */
4215 err = iavf_request_traffic_irqs(adapter, netdev->name);
4216 if (err)
4217 goto err_req_irq;
4218
4219 spin_lock_bh(&adapter->mac_vlan_list_lock);
4220
4221 iavf_add_filter(adapter, adapter->hw.mac.addr);
4222
4223 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4224
4225 /* Restore VLAN filters that were removed with IFF_DOWN */
4226 iavf_restore_filters(adapter);
4227
4228 iavf_configure(adapter);
4229
4230 iavf_up_complete(adapter);
4231
4232 iavf_irq_enable(adapter, true);
4233
4234 mutex_unlock(&adapter->crit_lock);
4235
4236 return 0;
4237
4238 err_req_irq:
4239 iavf_down(adapter);
4240 iavf_free_traffic_irqs(adapter);
4241 err_setup_rx:
4242 iavf_free_all_rx_resources(adapter);
4243 err_setup_tx:
4244 iavf_free_all_tx_resources(adapter);
4245 err_unlock:
4246 mutex_unlock(&adapter->crit_lock);
4247
4248 return err;
4249 }
4250
4251 /**
4252 * iavf_close - Disables a network interface
4253 * @netdev: network interface device structure
4254 *
4255 * Returns 0, this is not allowed to fail
4256 *
4257 * The close entry point is called when an interface is de-activated
4258 * by the OS. The hardware is still under the drivers control, but
4259 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4260 * are freed, along with all transmit and receive resources.
4261 **/
iavf_close(struct net_device * netdev)4262 static int iavf_close(struct net_device *netdev)
4263 {
4264 struct iavf_adapter *adapter = netdev_priv(netdev);
4265 u64 aq_to_restore;
4266 int status;
4267
4268 mutex_lock(&adapter->crit_lock);
4269
4270 if (adapter->state <= __IAVF_DOWN_PENDING) {
4271 mutex_unlock(&adapter->crit_lock);
4272 return 0;
4273 }
4274
4275 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4276 if (CLIENT_ENABLED(adapter))
4277 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4278 /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4279 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4280 * deadlock with adminq_task() until iavf_close timeouts. We must send
4281 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4282 * disable queues possible for vf. Give only necessary flags to
4283 * iavf_down and save other to set them right before iavf_close()
4284 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4285 * iavf will be in DOWN state.
4286 */
4287 aq_to_restore = adapter->aq_required;
4288 adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4289
4290 /* Remove flags which we do not want to send after close or we want to
4291 * send before disable queues.
4292 */
4293 aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
4294 IAVF_FLAG_AQ_ENABLE_QUEUES |
4295 IAVF_FLAG_AQ_CONFIGURE_QUEUES |
4296 IAVF_FLAG_AQ_ADD_VLAN_FILTER |
4297 IAVF_FLAG_AQ_ADD_MAC_FILTER |
4298 IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
4299 IAVF_FLAG_AQ_ADD_FDIR_FILTER |
4300 IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4301
4302 iavf_down(adapter);
4303 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4304 iavf_free_traffic_irqs(adapter);
4305
4306 mutex_unlock(&adapter->crit_lock);
4307
4308 /* We explicitly don't free resources here because the hardware is
4309 * still active and can DMA into memory. Resources are cleared in
4310 * iavf_virtchnl_completion() after we get confirmation from the PF
4311 * driver that the rings have been stopped.
4312 *
4313 * Also, we wait for state to transition to __IAVF_DOWN before
4314 * returning. State change occurs in iavf_virtchnl_completion() after
4315 * VF resources are released (which occurs after PF driver processes and
4316 * responds to admin queue commands).
4317 */
4318
4319 status = wait_event_timeout(adapter->down_waitqueue,
4320 adapter->state == __IAVF_DOWN,
4321 msecs_to_jiffies(500));
4322 if (!status)
4323 netdev_warn(netdev, "Device resources not yet released\n");
4324
4325 mutex_lock(&adapter->crit_lock);
4326 adapter->aq_required |= aq_to_restore;
4327 mutex_unlock(&adapter->crit_lock);
4328 return 0;
4329 }
4330
4331 /**
4332 * iavf_change_mtu - Change the Maximum Transfer Unit
4333 * @netdev: network interface device structure
4334 * @new_mtu: new value for maximum frame size
4335 *
4336 * Returns 0 on success, negative on failure
4337 **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)4338 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4339 {
4340 struct iavf_adapter *adapter = netdev_priv(netdev);
4341
4342 netdev_dbg(netdev, "changing MTU from %d to %d\n",
4343 netdev->mtu, new_mtu);
4344 netdev->mtu = new_mtu;
4345 if (CLIENT_ENABLED(adapter)) {
4346 iavf_notify_client_l2_params(&adapter->vsi);
4347 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4348 }
4349
4350 if (netif_running(netdev)) {
4351 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
4352 queue_work(iavf_wq, &adapter->reset_task);
4353 }
4354
4355 return 0;
4356 }
4357
4358 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
4359 NETIF_F_HW_VLAN_CTAG_TX | \
4360 NETIF_F_HW_VLAN_STAG_RX | \
4361 NETIF_F_HW_VLAN_STAG_TX)
4362
4363 /**
4364 * iavf_set_features - set the netdev feature flags
4365 * @netdev: ptr to the netdev being adjusted
4366 * @features: the feature set that the stack is suggesting
4367 * Note: expects to be called while under rtnl_lock()
4368 **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)4369 static int iavf_set_features(struct net_device *netdev,
4370 netdev_features_t features)
4371 {
4372 struct iavf_adapter *adapter = netdev_priv(netdev);
4373
4374 /* trigger update on any VLAN feature change */
4375 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4376 (features & NETIF_VLAN_OFFLOAD_FEATURES))
4377 iavf_set_vlan_offload_features(adapter, netdev->features,
4378 features);
4379
4380 return 0;
4381 }
4382
4383 /**
4384 * iavf_features_check - Validate encapsulated packet conforms to limits
4385 * @skb: skb buff
4386 * @dev: This physical port's netdev
4387 * @features: Offload features that the stack believes apply
4388 **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4389 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4390 struct net_device *dev,
4391 netdev_features_t features)
4392 {
4393 size_t len;
4394
4395 /* No point in doing any of this if neither checksum nor GSO are
4396 * being requested for this frame. We can rule out both by just
4397 * checking for CHECKSUM_PARTIAL
4398 */
4399 if (skb->ip_summed != CHECKSUM_PARTIAL)
4400 return features;
4401
4402 /* We cannot support GSO if the MSS is going to be less than
4403 * 64 bytes. If it is then we need to drop support for GSO.
4404 */
4405 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4406 features &= ~NETIF_F_GSO_MASK;
4407
4408 /* MACLEN can support at most 63 words */
4409 len = skb_network_header(skb) - skb->data;
4410 if (len & ~(63 * 2))
4411 goto out_err;
4412
4413 /* IPLEN and EIPLEN can support at most 127 dwords */
4414 len = skb_transport_header(skb) - skb_network_header(skb);
4415 if (len & ~(127 * 4))
4416 goto out_err;
4417
4418 if (skb->encapsulation) {
4419 /* L4TUNLEN can support 127 words */
4420 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4421 if (len & ~(127 * 2))
4422 goto out_err;
4423
4424 /* IPLEN can support at most 127 dwords */
4425 len = skb_inner_transport_header(skb) -
4426 skb_inner_network_header(skb);
4427 if (len & ~(127 * 4))
4428 goto out_err;
4429 }
4430
4431 /* No need to validate L4LEN as TCP is the only protocol with a
4432 * flexible value and we support all possible values supported
4433 * by TCP, which is at most 15 dwords
4434 */
4435
4436 return features;
4437 out_err:
4438 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4439 }
4440
4441 /**
4442 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4443 * @adapter: board private structure
4444 *
4445 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4446 * were negotiated determine the VLAN features that can be toggled on and off.
4447 **/
4448 static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)4449 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4450 {
4451 netdev_features_t hw_features = 0;
4452
4453 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4454 return hw_features;
4455
4456 /* Enable VLAN features if supported */
4457 if (VLAN_ALLOWED(adapter)) {
4458 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4459 NETIF_F_HW_VLAN_CTAG_RX);
4460 } else if (VLAN_V2_ALLOWED(adapter)) {
4461 struct virtchnl_vlan_caps *vlan_v2_caps =
4462 &adapter->vlan_v2_caps;
4463 struct virtchnl_vlan_supported_caps *stripping_support =
4464 &vlan_v2_caps->offloads.stripping_support;
4465 struct virtchnl_vlan_supported_caps *insertion_support =
4466 &vlan_v2_caps->offloads.insertion_support;
4467
4468 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4469 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4470 if (stripping_support->outer &
4471 VIRTCHNL_VLAN_ETHERTYPE_8100)
4472 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4473 if (stripping_support->outer &
4474 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4475 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4476 } else if (stripping_support->inner !=
4477 VIRTCHNL_VLAN_UNSUPPORTED &&
4478 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4479 if (stripping_support->inner &
4480 VIRTCHNL_VLAN_ETHERTYPE_8100)
4481 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4482 }
4483
4484 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4485 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4486 if (insertion_support->outer &
4487 VIRTCHNL_VLAN_ETHERTYPE_8100)
4488 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4489 if (insertion_support->outer &
4490 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4491 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4492 } else if (insertion_support->inner &&
4493 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4494 if (insertion_support->inner &
4495 VIRTCHNL_VLAN_ETHERTYPE_8100)
4496 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4497 }
4498 }
4499
4500 return hw_features;
4501 }
4502
4503 /**
4504 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4505 * @adapter: board private structure
4506 *
4507 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4508 * were negotiated determine the VLAN features that are enabled by default.
4509 **/
4510 static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)4511 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4512 {
4513 netdev_features_t features = 0;
4514
4515 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4516 return features;
4517
4518 if (VLAN_ALLOWED(adapter)) {
4519 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4520 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4521 } else if (VLAN_V2_ALLOWED(adapter)) {
4522 struct virtchnl_vlan_caps *vlan_v2_caps =
4523 &adapter->vlan_v2_caps;
4524 struct virtchnl_vlan_supported_caps *filtering_support =
4525 &vlan_v2_caps->filtering.filtering_support;
4526 struct virtchnl_vlan_supported_caps *stripping_support =
4527 &vlan_v2_caps->offloads.stripping_support;
4528 struct virtchnl_vlan_supported_caps *insertion_support =
4529 &vlan_v2_caps->offloads.insertion_support;
4530 u32 ethertype_init;
4531
4532 /* give priority to outer stripping and don't support both outer
4533 * and inner stripping
4534 */
4535 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4536 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4537 if (stripping_support->outer &
4538 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4539 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4540 features |= NETIF_F_HW_VLAN_CTAG_RX;
4541 else if (stripping_support->outer &
4542 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4543 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4544 features |= NETIF_F_HW_VLAN_STAG_RX;
4545 } else if (stripping_support->inner !=
4546 VIRTCHNL_VLAN_UNSUPPORTED) {
4547 if (stripping_support->inner &
4548 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4549 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4550 features |= NETIF_F_HW_VLAN_CTAG_RX;
4551 }
4552
4553 /* give priority to outer insertion and don't support both outer
4554 * and inner insertion
4555 */
4556 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4557 if (insertion_support->outer &
4558 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4559 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4560 features |= NETIF_F_HW_VLAN_CTAG_TX;
4561 else if (insertion_support->outer &
4562 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4563 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4564 features |= NETIF_F_HW_VLAN_STAG_TX;
4565 } else if (insertion_support->inner !=
4566 VIRTCHNL_VLAN_UNSUPPORTED) {
4567 if (insertion_support->inner &
4568 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4569 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4570 features |= NETIF_F_HW_VLAN_CTAG_TX;
4571 }
4572
4573 /* give priority to outer filtering and don't bother if both
4574 * outer and inner filtering are enabled
4575 */
4576 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4577 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4578 if (filtering_support->outer &
4579 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4580 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4581 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4582 if (filtering_support->outer &
4583 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4584 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4585 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4586 } else if (filtering_support->inner !=
4587 VIRTCHNL_VLAN_UNSUPPORTED) {
4588 if (filtering_support->inner &
4589 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4590 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4591 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4592 if (filtering_support->inner &
4593 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4594 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4595 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4596 }
4597 }
4598
4599 return features;
4600 }
4601
4602 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4603 (!(((requested) & (feature_bit)) && \
4604 !((allowed) & (feature_bit))))
4605
4606 /**
4607 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4608 * @adapter: board private structure
4609 * @requested_features: stack requested NETDEV features
4610 **/
4611 static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4612 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4613 netdev_features_t requested_features)
4614 {
4615 netdev_features_t allowed_features;
4616
4617 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4618 iavf_get_netdev_vlan_features(adapter);
4619
4620 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4621 allowed_features,
4622 NETIF_F_HW_VLAN_CTAG_TX))
4623 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4624
4625 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4626 allowed_features,
4627 NETIF_F_HW_VLAN_CTAG_RX))
4628 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4629
4630 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4631 allowed_features,
4632 NETIF_F_HW_VLAN_STAG_TX))
4633 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4634 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4635 allowed_features,
4636 NETIF_F_HW_VLAN_STAG_RX))
4637 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4638
4639 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4640 allowed_features,
4641 NETIF_F_HW_VLAN_CTAG_FILTER))
4642 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4643
4644 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4645 allowed_features,
4646 NETIF_F_HW_VLAN_STAG_FILTER))
4647 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4648
4649 if ((requested_features &
4650 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4651 (requested_features &
4652 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4653 adapter->vlan_v2_caps.offloads.ethertype_match ==
4654 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4655 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4656 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4657 NETIF_F_HW_VLAN_STAG_TX);
4658 }
4659
4660 return requested_features;
4661 }
4662
4663 /**
4664 * iavf_fix_features - fix up the netdev feature bits
4665 * @netdev: our net device
4666 * @features: desired feature bits
4667 *
4668 * Returns fixed-up features bits
4669 **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)4670 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4671 netdev_features_t features)
4672 {
4673 struct iavf_adapter *adapter = netdev_priv(netdev);
4674
4675 return iavf_fix_netdev_vlan_features(adapter, features);
4676 }
4677
4678 static const struct net_device_ops iavf_netdev_ops = {
4679 .ndo_open = iavf_open,
4680 .ndo_stop = iavf_close,
4681 .ndo_start_xmit = iavf_xmit_frame,
4682 .ndo_set_rx_mode = iavf_set_rx_mode,
4683 .ndo_validate_addr = eth_validate_addr,
4684 .ndo_set_mac_address = iavf_set_mac,
4685 .ndo_change_mtu = iavf_change_mtu,
4686 .ndo_tx_timeout = iavf_tx_timeout,
4687 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
4688 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
4689 .ndo_features_check = iavf_features_check,
4690 .ndo_fix_features = iavf_fix_features,
4691 .ndo_set_features = iavf_set_features,
4692 .ndo_setup_tc = iavf_setup_tc,
4693 };
4694
4695 /**
4696 * iavf_check_reset_complete - check that VF reset is complete
4697 * @hw: pointer to hw struct
4698 *
4699 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4700 **/
iavf_check_reset_complete(struct iavf_hw * hw)4701 static int iavf_check_reset_complete(struct iavf_hw *hw)
4702 {
4703 u32 rstat;
4704 int i;
4705
4706 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4707 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4708 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4709 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4710 (rstat == VIRTCHNL_VFR_COMPLETED))
4711 return 0;
4712 usleep_range(10, 20);
4713 }
4714 return -EBUSY;
4715 }
4716
4717 /**
4718 * iavf_process_config - Process the config information we got from the PF
4719 * @adapter: board private structure
4720 *
4721 * Verify that we have a valid config struct, and set up our netdev features
4722 * and our VSI struct.
4723 **/
iavf_process_config(struct iavf_adapter * adapter)4724 int iavf_process_config(struct iavf_adapter *adapter)
4725 {
4726 struct virtchnl_vf_resource *vfres = adapter->vf_res;
4727 netdev_features_t hw_vlan_features, vlan_features;
4728 struct net_device *netdev = adapter->netdev;
4729 netdev_features_t hw_enc_features;
4730 netdev_features_t hw_features;
4731
4732 hw_enc_features = NETIF_F_SG |
4733 NETIF_F_IP_CSUM |
4734 NETIF_F_IPV6_CSUM |
4735 NETIF_F_HIGHDMA |
4736 NETIF_F_SOFT_FEATURES |
4737 NETIF_F_TSO |
4738 NETIF_F_TSO_ECN |
4739 NETIF_F_TSO6 |
4740 NETIF_F_SCTP_CRC |
4741 NETIF_F_RXHASH |
4742 NETIF_F_RXCSUM |
4743 0;
4744
4745 /* advertise to stack only if offloads for encapsulated packets is
4746 * supported
4747 */
4748 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4749 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4750 NETIF_F_GSO_GRE |
4751 NETIF_F_GSO_GRE_CSUM |
4752 NETIF_F_GSO_IPXIP4 |
4753 NETIF_F_GSO_IPXIP6 |
4754 NETIF_F_GSO_UDP_TUNNEL_CSUM |
4755 NETIF_F_GSO_PARTIAL |
4756 0;
4757
4758 if (!(vfres->vf_cap_flags &
4759 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4760 netdev->gso_partial_features |=
4761 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4762
4763 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4764 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4765 netdev->hw_enc_features |= hw_enc_features;
4766 }
4767 /* record features VLANs can make use of */
4768 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4769
4770 /* Write features and hw_features separately to avoid polluting
4771 * with, or dropping, features that are set when we registered.
4772 */
4773 hw_features = hw_enc_features;
4774
4775 /* get HW VLAN features that can be toggled */
4776 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4777
4778 /* Enable cloud filter if ADQ is supported */
4779 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4780 hw_features |= NETIF_F_HW_TC;
4781 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4782 hw_features |= NETIF_F_GSO_UDP_L4;
4783
4784 netdev->hw_features |= hw_features | hw_vlan_features;
4785 vlan_features = iavf_get_netdev_vlan_features(adapter);
4786
4787 netdev->features |= hw_features | vlan_features;
4788
4789 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4790 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4791
4792 netdev->priv_flags |= IFF_UNICAST_FLT;
4793
4794 /* Do not turn on offloads when they are requested to be turned off.
4795 * TSO needs minimum 576 bytes to work correctly.
4796 */
4797 if (netdev->wanted_features) {
4798 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4799 netdev->mtu < 576)
4800 netdev->features &= ~NETIF_F_TSO;
4801 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4802 netdev->mtu < 576)
4803 netdev->features &= ~NETIF_F_TSO6;
4804 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4805 netdev->features &= ~NETIF_F_TSO_ECN;
4806 if (!(netdev->wanted_features & NETIF_F_GRO))
4807 netdev->features &= ~NETIF_F_GRO;
4808 if (!(netdev->wanted_features & NETIF_F_GSO))
4809 netdev->features &= ~NETIF_F_GSO;
4810 }
4811
4812 return 0;
4813 }
4814
4815 /**
4816 * iavf_shutdown - Shutdown the device in preparation for a reboot
4817 * @pdev: pci device structure
4818 **/
iavf_shutdown(struct pci_dev * pdev)4819 static void iavf_shutdown(struct pci_dev *pdev)
4820 {
4821 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4822 struct net_device *netdev = adapter->netdev;
4823
4824 netif_device_detach(netdev);
4825
4826 if (netif_running(netdev))
4827 iavf_close(netdev);
4828
4829 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4830 dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
4831 /* Prevent the watchdog from running. */
4832 iavf_change_state(adapter, __IAVF_REMOVE);
4833 adapter->aq_required = 0;
4834 mutex_unlock(&adapter->crit_lock);
4835
4836 #ifdef CONFIG_PM
4837 pci_save_state(pdev);
4838
4839 #endif
4840 pci_disable_device(pdev);
4841 }
4842
4843 /**
4844 * iavf_probe - Device Initialization Routine
4845 * @pdev: PCI device information struct
4846 * @ent: entry in iavf_pci_tbl
4847 *
4848 * Returns 0 on success, negative on failure
4849 *
4850 * iavf_probe initializes an adapter identified by a pci_dev structure.
4851 * The OS initialization, configuring of the adapter private structure,
4852 * and a hardware reset occur.
4853 **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)4854 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4855 {
4856 struct net_device *netdev;
4857 struct iavf_adapter *adapter = NULL;
4858 struct iavf_hw *hw = NULL;
4859 int err;
4860
4861 err = pci_enable_device(pdev);
4862 if (err)
4863 return err;
4864
4865 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4866 if (err) {
4867 dev_err(&pdev->dev,
4868 "DMA configuration failed: 0x%x\n", err);
4869 goto err_dma;
4870 }
4871
4872 err = pci_request_regions(pdev, iavf_driver_name);
4873 if (err) {
4874 dev_err(&pdev->dev,
4875 "pci_request_regions failed 0x%x\n", err);
4876 goto err_pci_reg;
4877 }
4878
4879 pci_enable_pcie_error_reporting(pdev);
4880
4881 pci_set_master(pdev);
4882
4883 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4884 IAVF_MAX_REQ_QUEUES);
4885 if (!netdev) {
4886 err = -ENOMEM;
4887 goto err_alloc_etherdev;
4888 }
4889
4890 SET_NETDEV_DEV(netdev, &pdev->dev);
4891
4892 pci_set_drvdata(pdev, netdev);
4893 adapter = netdev_priv(netdev);
4894
4895 adapter->netdev = netdev;
4896 adapter->pdev = pdev;
4897
4898 hw = &adapter->hw;
4899 hw->back = adapter;
4900
4901 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4902 iavf_change_state(adapter, __IAVF_STARTUP);
4903
4904 /* Call save state here because it relies on the adapter struct. */
4905 pci_save_state(pdev);
4906
4907 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4908 pci_resource_len(pdev, 0));
4909 if (!hw->hw_addr) {
4910 err = -EIO;
4911 goto err_ioremap;
4912 }
4913 hw->vendor_id = pdev->vendor;
4914 hw->device_id = pdev->device;
4915 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4916 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4917 hw->subsystem_device_id = pdev->subsystem_device;
4918 hw->bus.device = PCI_SLOT(pdev->devfn);
4919 hw->bus.func = PCI_FUNC(pdev->devfn);
4920 hw->bus.bus_id = pdev->bus->number;
4921
4922 /* set up the locks for the AQ, do this only once in probe
4923 * and destroy them only once in remove
4924 */
4925 mutex_init(&adapter->crit_lock);
4926 mutex_init(&adapter->client_lock);
4927 mutex_init(&hw->aq.asq_mutex);
4928 mutex_init(&hw->aq.arq_mutex);
4929
4930 spin_lock_init(&adapter->mac_vlan_list_lock);
4931 spin_lock_init(&adapter->cloud_filter_list_lock);
4932 spin_lock_init(&adapter->fdir_fltr_lock);
4933 spin_lock_init(&adapter->adv_rss_lock);
4934
4935 INIT_LIST_HEAD(&adapter->mac_filter_list);
4936 INIT_LIST_HEAD(&adapter->vlan_filter_list);
4937 INIT_LIST_HEAD(&adapter->cloud_filter_list);
4938 INIT_LIST_HEAD(&adapter->fdir_list_head);
4939 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4940
4941 INIT_WORK(&adapter->reset_task, iavf_reset_task);
4942 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4943 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4944 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4945 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
4946 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4947
4948 /* Setup the wait queue for indicating transition to down status */
4949 init_waitqueue_head(&adapter->down_waitqueue);
4950
4951 /* Setup the wait queue for indicating virtchannel events */
4952 init_waitqueue_head(&adapter->vc_waitqueue);
4953
4954 return 0;
4955
4956 err_ioremap:
4957 free_netdev(netdev);
4958 err_alloc_etherdev:
4959 pci_disable_pcie_error_reporting(pdev);
4960 pci_release_regions(pdev);
4961 err_pci_reg:
4962 err_dma:
4963 pci_disable_device(pdev);
4964 return err;
4965 }
4966
4967 /**
4968 * iavf_suspend - Power management suspend routine
4969 * @dev_d: device info pointer
4970 *
4971 * Called when the system (VM) is entering sleep/suspend.
4972 **/
iavf_suspend(struct device * dev_d)4973 static int __maybe_unused iavf_suspend(struct device *dev_d)
4974 {
4975 struct net_device *netdev = dev_get_drvdata(dev_d);
4976 struct iavf_adapter *adapter = netdev_priv(netdev);
4977
4978 netif_device_detach(netdev);
4979
4980 while (!mutex_trylock(&adapter->crit_lock))
4981 usleep_range(500, 1000);
4982
4983 if (netif_running(netdev)) {
4984 rtnl_lock();
4985 iavf_down(adapter);
4986 rtnl_unlock();
4987 }
4988 iavf_free_misc_irq(adapter);
4989 iavf_reset_interrupt_capability(adapter);
4990
4991 mutex_unlock(&adapter->crit_lock);
4992
4993 return 0;
4994 }
4995
4996 /**
4997 * iavf_resume - Power management resume routine
4998 * @dev_d: device info pointer
4999 *
5000 * Called when the system (VM) is resumed from sleep/suspend.
5001 **/
iavf_resume(struct device * dev_d)5002 static int __maybe_unused iavf_resume(struct device *dev_d)
5003 {
5004 struct pci_dev *pdev = to_pci_dev(dev_d);
5005 struct iavf_adapter *adapter;
5006 u32 err;
5007
5008 adapter = iavf_pdev_to_adapter(pdev);
5009
5010 pci_set_master(pdev);
5011
5012 rtnl_lock();
5013 err = iavf_set_interrupt_capability(adapter);
5014 if (err) {
5015 rtnl_unlock();
5016 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5017 return err;
5018 }
5019 err = iavf_request_misc_irq(adapter);
5020 rtnl_unlock();
5021 if (err) {
5022 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5023 return err;
5024 }
5025
5026 queue_work(iavf_wq, &adapter->reset_task);
5027
5028 netif_device_attach(adapter->netdev);
5029
5030 return err;
5031 }
5032
5033 /**
5034 * iavf_remove - Device Removal Routine
5035 * @pdev: PCI device information struct
5036 *
5037 * iavf_remove is called by the PCI subsystem to alert the driver
5038 * that it should release a PCI device. The could be caused by a
5039 * Hot-Plug event, or because the driver is going to be removed from
5040 * memory.
5041 **/
iavf_remove(struct pci_dev * pdev)5042 static void iavf_remove(struct pci_dev *pdev)
5043 {
5044 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5045 struct iavf_fdir_fltr *fdir, *fdirtmp;
5046 struct iavf_vlan_filter *vlf, *vlftmp;
5047 struct iavf_cloud_filter *cf, *cftmp;
5048 struct iavf_adv_rss *rss, *rsstmp;
5049 struct iavf_mac_filter *f, *ftmp;
5050 struct net_device *netdev;
5051 struct iavf_hw *hw;
5052 int err;
5053
5054 netdev = adapter->netdev;
5055 hw = &adapter->hw;
5056
5057 if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5058 return;
5059
5060 /* Wait until port initialization is complete.
5061 * There are flows where register/unregister netdev may race.
5062 */
5063 while (1) {
5064 mutex_lock(&adapter->crit_lock);
5065 if (adapter->state == __IAVF_RUNNING ||
5066 adapter->state == __IAVF_DOWN ||
5067 adapter->state == __IAVF_INIT_FAILED) {
5068 mutex_unlock(&adapter->crit_lock);
5069 break;
5070 }
5071
5072 mutex_unlock(&adapter->crit_lock);
5073 usleep_range(500, 1000);
5074 }
5075 cancel_delayed_work_sync(&adapter->watchdog_task);
5076
5077 if (adapter->netdev_registered) {
5078 rtnl_lock();
5079 unregister_netdevice(netdev);
5080 adapter->netdev_registered = false;
5081 rtnl_unlock();
5082 }
5083 if (CLIENT_ALLOWED(adapter)) {
5084 err = iavf_lan_del_device(adapter);
5085 if (err)
5086 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5087 err);
5088 }
5089
5090 mutex_lock(&adapter->crit_lock);
5091 dev_info(&adapter->pdev->dev, "Remove device\n");
5092 iavf_change_state(adapter, __IAVF_REMOVE);
5093
5094 iavf_request_reset(adapter);
5095 msleep(50);
5096 /* If the FW isn't responding, kick it once, but only once. */
5097 if (!iavf_asq_done(hw)) {
5098 iavf_request_reset(adapter);
5099 msleep(50);
5100 }
5101
5102 iavf_misc_irq_disable(adapter);
5103 /* Shut down all the garbage mashers on the detention level */
5104 cancel_work_sync(&adapter->reset_task);
5105 cancel_delayed_work_sync(&adapter->watchdog_task);
5106 cancel_work_sync(&adapter->adminq_task);
5107 cancel_delayed_work_sync(&adapter->client_task);
5108
5109 adapter->aq_required = 0;
5110 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5111
5112 iavf_free_all_tx_resources(adapter);
5113 iavf_free_all_rx_resources(adapter);
5114 iavf_free_misc_irq(adapter);
5115
5116 iavf_reset_interrupt_capability(adapter);
5117 iavf_free_q_vectors(adapter);
5118
5119 iavf_free_rss(adapter);
5120
5121 if (hw->aq.asq.count)
5122 iavf_shutdown_adminq(hw);
5123
5124 /* destroy the locks only once, here */
5125 mutex_destroy(&hw->aq.arq_mutex);
5126 mutex_destroy(&hw->aq.asq_mutex);
5127 mutex_destroy(&adapter->client_lock);
5128 mutex_unlock(&adapter->crit_lock);
5129 mutex_destroy(&adapter->crit_lock);
5130
5131 iounmap(hw->hw_addr);
5132 pci_release_regions(pdev);
5133 iavf_free_queues(adapter);
5134 kfree(adapter->vf_res);
5135 spin_lock_bh(&adapter->mac_vlan_list_lock);
5136 /* If we got removed before an up/down sequence, we've got a filter
5137 * hanging out there that we need to get rid of.
5138 */
5139 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5140 list_del(&f->list);
5141 kfree(f);
5142 }
5143 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5144 list) {
5145 list_del(&vlf->list);
5146 kfree(vlf);
5147 }
5148
5149 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5150
5151 spin_lock_bh(&adapter->cloud_filter_list_lock);
5152 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5153 list_del(&cf->list);
5154 kfree(cf);
5155 }
5156 spin_unlock_bh(&adapter->cloud_filter_list_lock);
5157
5158 spin_lock_bh(&adapter->fdir_fltr_lock);
5159 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5160 list_del(&fdir->list);
5161 kfree(fdir);
5162 }
5163 spin_unlock_bh(&adapter->fdir_fltr_lock);
5164
5165 spin_lock_bh(&adapter->adv_rss_lock);
5166 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5167 list) {
5168 list_del(&rss->list);
5169 kfree(rss);
5170 }
5171 spin_unlock_bh(&adapter->adv_rss_lock);
5172
5173 free_netdev(netdev);
5174
5175 pci_disable_pcie_error_reporting(pdev);
5176
5177 pci_disable_device(pdev);
5178 }
5179
5180 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5181
5182 static struct pci_driver iavf_driver = {
5183 .name = iavf_driver_name,
5184 .id_table = iavf_pci_tbl,
5185 .probe = iavf_probe,
5186 .remove = iavf_remove,
5187 .driver.pm = &iavf_pm_ops,
5188 .shutdown = iavf_shutdown,
5189 };
5190
5191 /**
5192 * iavf_init_module - Driver Registration Routine
5193 *
5194 * iavf_init_module is the first routine called when the driver is
5195 * loaded. All it does is register with the PCI subsystem.
5196 **/
iavf_init_module(void)5197 static int __init iavf_init_module(void)
5198 {
5199 int ret;
5200
5201 pr_info("iavf: %s\n", iavf_driver_string);
5202
5203 pr_info("%s\n", iavf_copyright);
5204
5205 iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
5206 iavf_driver_name);
5207 if (!iavf_wq) {
5208 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
5209 return -ENOMEM;
5210 }
5211
5212 ret = pci_register_driver(&iavf_driver);
5213 if (ret)
5214 destroy_workqueue(iavf_wq);
5215
5216 return ret;
5217 }
5218
5219 module_init(iavf_init_module);
5220
5221 /**
5222 * iavf_exit_module - Driver Exit Cleanup Routine
5223 *
5224 * iavf_exit_module is called just before the driver is removed
5225 * from memory.
5226 **/
iavf_exit_module(void)5227 static void __exit iavf_exit_module(void)
5228 {
5229 pci_unregister_driver(&iavf_driver);
5230 destroy_workqueue(iavf_wq);
5231 }
5232
5233 module_exit(iavf_exit_module);
5234
5235 /* iavf_main.c */
5236