1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
10 */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static void iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25 "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28 *
29 * Wildcard entries (PCI_ANY_ID) should come last
30 * Last entry must be all 0s
31 *
32 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33 * Class, Class Mask, private data (not used) }
34 */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39 {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40 /* required last entry */
41 {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52
iavf_status_to_errno(enum iavf_status status)53 int iavf_status_to_errno(enum iavf_status status)
54 {
55 switch (status) {
56 case IAVF_SUCCESS:
57 return 0;
58 case IAVF_ERR_PARAM:
59 case IAVF_ERR_MAC_TYPE:
60 case IAVF_ERR_INVALID_MAC_ADDR:
61 case IAVF_ERR_INVALID_LINK_SETTINGS:
62 case IAVF_ERR_INVALID_PD_ID:
63 case IAVF_ERR_INVALID_QP_ID:
64 case IAVF_ERR_INVALID_CQ_ID:
65 case IAVF_ERR_INVALID_CEQ_ID:
66 case IAVF_ERR_INVALID_AEQ_ID:
67 case IAVF_ERR_INVALID_SIZE:
68 case IAVF_ERR_INVALID_ARP_INDEX:
69 case IAVF_ERR_INVALID_FPM_FUNC_ID:
70 case IAVF_ERR_QP_INVALID_MSG_SIZE:
71 case IAVF_ERR_INVALID_FRAG_COUNT:
72 case IAVF_ERR_INVALID_ALIGNMENT:
73 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX:
74 case IAVF_ERR_INVALID_IMM_DATA_SIZE:
75 case IAVF_ERR_INVALID_VF_ID:
76 case IAVF_ERR_INVALID_HMCFN_ID:
77 case IAVF_ERR_INVALID_PBLE_INDEX:
78 case IAVF_ERR_INVALID_SD_INDEX:
79 case IAVF_ERR_INVALID_PAGE_DESC_INDEX:
80 case IAVF_ERR_INVALID_SD_TYPE:
81 case IAVF_ERR_INVALID_HMC_OBJ_INDEX:
82 case IAVF_ERR_INVALID_HMC_OBJ_COUNT:
83 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT:
84 return -EINVAL;
85 case IAVF_ERR_NVM:
86 case IAVF_ERR_NVM_CHECKSUM:
87 case IAVF_ERR_PHY:
88 case IAVF_ERR_CONFIG:
89 case IAVF_ERR_UNKNOWN_PHY:
90 case IAVF_ERR_LINK_SETUP:
91 case IAVF_ERR_ADAPTER_STOPPED:
92 case IAVF_ERR_PRIMARY_REQUESTS_PENDING:
93 case IAVF_ERR_AUTONEG_NOT_COMPLETE:
94 case IAVF_ERR_RESET_FAILED:
95 case IAVF_ERR_BAD_PTR:
96 case IAVF_ERR_SWFW_SYNC:
97 case IAVF_ERR_QP_TOOMANY_WRS_POSTED:
98 case IAVF_ERR_QUEUE_EMPTY:
99 case IAVF_ERR_FLUSHED_QUEUE:
100 case IAVF_ERR_OPCODE_MISMATCH:
101 case IAVF_ERR_CQP_COMPL_ERROR:
102 case IAVF_ERR_BACKING_PAGE_ERROR:
103 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE:
104 case IAVF_ERR_MEMCPY_FAILED:
105 case IAVF_ERR_SRQ_ENABLED:
106 case IAVF_ERR_ADMIN_QUEUE_ERROR:
107 case IAVF_ERR_ADMIN_QUEUE_FULL:
108 case IAVF_ERR_BAD_RDMA_CQE:
109 case IAVF_ERR_NVM_BLANK_MODE:
110 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
111 case IAVF_ERR_DIAG_TEST_FAILED:
112 case IAVF_ERR_FIRMWARE_API_VERSION:
113 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
114 return -EIO;
115 case IAVF_ERR_DEVICE_NOT_SUPPORTED:
116 return -ENODEV;
117 case IAVF_ERR_NO_AVAILABLE_VSI:
118 case IAVF_ERR_RING_FULL:
119 return -ENOSPC;
120 case IAVF_ERR_NO_MEMORY:
121 return -ENOMEM;
122 case IAVF_ERR_TIMEOUT:
123 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT:
124 return -ETIMEDOUT;
125 case IAVF_ERR_NOT_IMPLEMENTED:
126 case IAVF_NOT_SUPPORTED:
127 return -EOPNOTSUPP;
128 case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
129 return -EALREADY;
130 case IAVF_ERR_NOT_READY:
131 return -EBUSY;
132 case IAVF_ERR_BUF_TOO_SHORT:
133 return -EMSGSIZE;
134 }
135
136 return -EIO;
137 }
138
virtchnl_status_to_errno(enum virtchnl_status_code v_status)139 int virtchnl_status_to_errno(enum virtchnl_status_code v_status)
140 {
141 switch (v_status) {
142 case VIRTCHNL_STATUS_SUCCESS:
143 return 0;
144 case VIRTCHNL_STATUS_ERR_PARAM:
145 case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
146 return -EINVAL;
147 case VIRTCHNL_STATUS_ERR_NO_MEMORY:
148 return -ENOMEM;
149 case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
150 case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
151 case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
152 return -EIO;
153 case VIRTCHNL_STATUS_ERR_NOT_SUPPORTED:
154 return -EOPNOTSUPP;
155 }
156
157 return -EIO;
158 }
159
160 /**
161 * iavf_pdev_to_adapter - go from pci_dev to adapter
162 * @pdev: pci_dev pointer
163 */
iavf_pdev_to_adapter(struct pci_dev * pdev)164 static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
165 {
166 return netdev_priv(pci_get_drvdata(pdev));
167 }
168
169 /**
170 * iavf_is_reset_in_progress - Check if a reset is in progress
171 * @adapter: board private structure
172 */
iavf_is_reset_in_progress(struct iavf_adapter * adapter)173 static bool iavf_is_reset_in_progress(struct iavf_adapter *adapter)
174 {
175 if (adapter->state == __IAVF_RESETTING ||
176 adapter->flags & (IAVF_FLAG_RESET_PENDING |
177 IAVF_FLAG_RESET_NEEDED))
178 return true;
179
180 return false;
181 }
182
183 /**
184 * iavf_wait_for_reset - Wait for reset to finish.
185 * @adapter: board private structure
186 *
187 * Returns 0 if reset finished successfully, negative on timeout or interrupt.
188 */
iavf_wait_for_reset(struct iavf_adapter * adapter)189 int iavf_wait_for_reset(struct iavf_adapter *adapter)
190 {
191 int ret = wait_event_interruptible_timeout(adapter->reset_waitqueue,
192 !iavf_is_reset_in_progress(adapter),
193 msecs_to_jiffies(5000));
194
195 /* If ret < 0 then it means wait was interrupted.
196 * If ret == 0 then it means we got a timeout while waiting
197 * for reset to finish.
198 * If ret > 0 it means reset has finished.
199 */
200 if (ret > 0)
201 return 0;
202 else if (ret < 0)
203 return -EINTR;
204 else
205 return -EBUSY;
206 }
207
208 /**
209 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
210 * @hw: pointer to the HW structure
211 * @mem: ptr to mem struct to fill out
212 * @size: size of memory requested
213 * @alignment: what to align the allocation to
214 **/
iavf_allocate_dma_mem_d(struct iavf_hw * hw,struct iavf_dma_mem * mem,u64 size,u32 alignment)215 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
216 struct iavf_dma_mem *mem,
217 u64 size, u32 alignment)
218 {
219 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
220
221 if (!mem)
222 return IAVF_ERR_PARAM;
223
224 mem->size = ALIGN(size, alignment);
225 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
226 (dma_addr_t *)&mem->pa, GFP_KERNEL);
227 if (mem->va)
228 return 0;
229 else
230 return IAVF_ERR_NO_MEMORY;
231 }
232
233 /**
234 * iavf_free_dma_mem - wrapper for DMA memory freeing
235 * @hw: pointer to the HW structure
236 * @mem: ptr to mem struct to free
237 **/
iavf_free_dma_mem(struct iavf_hw * hw,struct iavf_dma_mem * mem)238 enum iavf_status iavf_free_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem)
239 {
240 struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
241
242 if (!mem || !mem->va)
243 return IAVF_ERR_PARAM;
244 dma_free_coherent(&adapter->pdev->dev, mem->size,
245 mem->va, (dma_addr_t)mem->pa);
246 return 0;
247 }
248
249 /**
250 * iavf_allocate_virt_mem - virt memory alloc wrapper
251 * @hw: pointer to the HW structure
252 * @mem: ptr to mem struct to fill out
253 * @size: size of memory requested
254 **/
iavf_allocate_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem,u32 size)255 enum iavf_status iavf_allocate_virt_mem(struct iavf_hw *hw,
256 struct iavf_virt_mem *mem, u32 size)
257 {
258 if (!mem)
259 return IAVF_ERR_PARAM;
260
261 mem->size = size;
262 mem->va = kzalloc(size, GFP_KERNEL);
263
264 if (mem->va)
265 return 0;
266 else
267 return IAVF_ERR_NO_MEMORY;
268 }
269
270 /**
271 * iavf_free_virt_mem - virt memory free wrapper
272 * @hw: pointer to the HW structure
273 * @mem: ptr to mem struct to free
274 **/
iavf_free_virt_mem(struct iavf_hw * hw,struct iavf_virt_mem * mem)275 void iavf_free_virt_mem(struct iavf_hw *hw, struct iavf_virt_mem *mem)
276 {
277 kfree(mem->va);
278 }
279
280 /**
281 * iavf_lock_timeout - try to lock mutex but give up after timeout
282 * @lock: mutex that should be locked
283 * @msecs: timeout in msecs
284 *
285 * Returns 0 on success, negative on failure
286 **/
iavf_lock_timeout(struct mutex * lock,unsigned int msecs)287 static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
288 {
289 unsigned int wait, delay = 10;
290
291 for (wait = 0; wait < msecs; wait += delay) {
292 if (mutex_trylock(lock))
293 return 0;
294
295 msleep(delay);
296 }
297
298 return -1;
299 }
300
301 /**
302 * iavf_schedule_reset - Set the flags and schedule a reset event
303 * @adapter: board private structure
304 * @flags: IAVF_FLAG_RESET_PENDING or IAVF_FLAG_RESET_NEEDED
305 **/
iavf_schedule_reset(struct iavf_adapter * adapter,u64 flags)306 void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
307 {
308 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) &&
309 !(adapter->flags &
310 (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
311 adapter->flags |= flags;
312 queue_work(adapter->wq, &adapter->reset_task);
313 }
314 }
315
316 /**
317 * iavf_schedule_aq_request - Set the flags and schedule aq request
318 * @adapter: board private structure
319 * @flags: requested aq flags
320 **/
iavf_schedule_aq_request(struct iavf_adapter * adapter,u64 flags)321 void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
322 {
323 adapter->aq_required |= flags;
324 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
325 }
326
327 /**
328 * iavf_tx_timeout - Respond to a Tx Hang
329 * @netdev: network interface device structure
330 * @txqueue: queue number that is timing out
331 **/
iavf_tx_timeout(struct net_device * netdev,unsigned int txqueue)332 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
333 {
334 struct iavf_adapter *adapter = netdev_priv(netdev);
335
336 adapter->tx_timeout_count++;
337 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
338 }
339
340 /**
341 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
342 * @adapter: board private structure
343 **/
iavf_misc_irq_disable(struct iavf_adapter * adapter)344 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
345 {
346 struct iavf_hw *hw = &adapter->hw;
347
348 if (!adapter->msix_entries)
349 return;
350
351 wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
352
353 iavf_flush(hw);
354
355 synchronize_irq(adapter->msix_entries[0].vector);
356 }
357
358 /**
359 * iavf_misc_irq_enable - Enable default interrupt generation settings
360 * @adapter: board private structure
361 **/
iavf_misc_irq_enable(struct iavf_adapter * adapter)362 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
363 {
364 struct iavf_hw *hw = &adapter->hw;
365
366 wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
367 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
368 wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
369
370 iavf_flush(hw);
371 }
372
373 /**
374 * iavf_irq_disable - Mask off interrupt generation on the NIC
375 * @adapter: board private structure
376 **/
iavf_irq_disable(struct iavf_adapter * adapter)377 static void iavf_irq_disable(struct iavf_adapter *adapter)
378 {
379 int i;
380 struct iavf_hw *hw = &adapter->hw;
381
382 if (!adapter->msix_entries)
383 return;
384
385 for (i = 1; i < adapter->num_msix_vectors; i++) {
386 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
387 synchronize_irq(adapter->msix_entries[i].vector);
388 }
389 iavf_flush(hw);
390 }
391
392 /**
393 * iavf_irq_enable_queues - Enable interrupt for all queues
394 * @adapter: board private structure
395 **/
iavf_irq_enable_queues(struct iavf_adapter * adapter)396 static void iavf_irq_enable_queues(struct iavf_adapter *adapter)
397 {
398 struct iavf_hw *hw = &adapter->hw;
399 int i;
400
401 for (i = 1; i < adapter->num_msix_vectors; i++) {
402 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
403 IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
404 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
405 }
406 }
407
408 /**
409 * iavf_irq_enable - Enable default interrupt generation settings
410 * @adapter: board private structure
411 * @flush: boolean value whether to run rd32()
412 **/
iavf_irq_enable(struct iavf_adapter * adapter,bool flush)413 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
414 {
415 struct iavf_hw *hw = &adapter->hw;
416
417 iavf_misc_irq_enable(adapter);
418 iavf_irq_enable_queues(adapter);
419
420 if (flush)
421 iavf_flush(hw);
422 }
423
424 /**
425 * iavf_msix_aq - Interrupt handler for vector 0
426 * @irq: interrupt number
427 * @data: pointer to netdev
428 **/
iavf_msix_aq(int irq,void * data)429 static irqreturn_t iavf_msix_aq(int irq, void *data)
430 {
431 struct net_device *netdev = data;
432 struct iavf_adapter *adapter = netdev_priv(netdev);
433 struct iavf_hw *hw = &adapter->hw;
434
435 /* handle non-queue interrupts, these reads clear the registers */
436 rd32(hw, IAVF_VFINT_ICR01);
437 rd32(hw, IAVF_VFINT_ICR0_ENA1);
438
439 if (adapter->state != __IAVF_REMOVE)
440 /* schedule work on the private workqueue */
441 queue_work(adapter->wq, &adapter->adminq_task);
442
443 return IRQ_HANDLED;
444 }
445
446 /**
447 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
448 * @irq: interrupt number
449 * @data: pointer to a q_vector
450 **/
iavf_msix_clean_rings(int irq,void * data)451 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
452 {
453 struct iavf_q_vector *q_vector = data;
454
455 if (!q_vector->tx.ring && !q_vector->rx.ring)
456 return IRQ_HANDLED;
457
458 napi_schedule_irqoff(&q_vector->napi);
459
460 return IRQ_HANDLED;
461 }
462
463 /**
464 * iavf_map_vector_to_rxq - associate irqs with rx queues
465 * @adapter: board private structure
466 * @v_idx: interrupt number
467 * @r_idx: queue number
468 **/
469 static void
iavf_map_vector_to_rxq(struct iavf_adapter * adapter,int v_idx,int r_idx)470 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
471 {
472 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
473 struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
474 struct iavf_hw *hw = &adapter->hw;
475
476 rx_ring->q_vector = q_vector;
477 rx_ring->next = q_vector->rx.ring;
478 rx_ring->vsi = &adapter->vsi;
479 q_vector->rx.ring = rx_ring;
480 q_vector->rx.count++;
481 q_vector->rx.next_update = jiffies + 1;
482 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
483 q_vector->ring_mask |= BIT(r_idx);
484 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
485 q_vector->rx.current_itr >> 1);
486 q_vector->rx.current_itr = q_vector->rx.target_itr;
487 }
488
489 /**
490 * iavf_map_vector_to_txq - associate irqs with tx queues
491 * @adapter: board private structure
492 * @v_idx: interrupt number
493 * @t_idx: queue number
494 **/
495 static void
iavf_map_vector_to_txq(struct iavf_adapter * adapter,int v_idx,int t_idx)496 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
497 {
498 struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
499 struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
500 struct iavf_hw *hw = &adapter->hw;
501
502 tx_ring->q_vector = q_vector;
503 tx_ring->next = q_vector->tx.ring;
504 tx_ring->vsi = &adapter->vsi;
505 q_vector->tx.ring = tx_ring;
506 q_vector->tx.count++;
507 q_vector->tx.next_update = jiffies + 1;
508 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
509 q_vector->num_ringpairs++;
510 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
511 q_vector->tx.target_itr >> 1);
512 q_vector->tx.current_itr = q_vector->tx.target_itr;
513 }
514
515 /**
516 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
517 * @adapter: board private structure to initialize
518 *
519 * This function maps descriptor rings to the queue-specific vectors
520 * we were allotted through the MSI-X enabling code. Ideally, we'd have
521 * one vector per ring/queue, but on a constrained vector budget, we
522 * group the rings as "efficiently" as possible. You would add new
523 * mapping configurations in here.
524 **/
iavf_map_rings_to_vectors(struct iavf_adapter * adapter)525 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
526 {
527 int rings_remaining = adapter->num_active_queues;
528 int ridx = 0, vidx = 0;
529 int q_vectors;
530
531 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
532
533 for (; ridx < rings_remaining; ridx++) {
534 iavf_map_vector_to_rxq(adapter, vidx, ridx);
535 iavf_map_vector_to_txq(adapter, vidx, ridx);
536
537 /* In the case where we have more queues than vectors, continue
538 * round-robin on vectors until all queues are mapped.
539 */
540 if (++vidx >= q_vectors)
541 vidx = 0;
542 }
543
544 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
545 }
546
547 /**
548 * iavf_irq_affinity_notify - Callback for affinity changes
549 * @notify: context as to what irq was changed
550 * @mask: the new affinity mask
551 *
552 * This is a callback function used by the irq_set_affinity_notifier function
553 * so that we may register to receive changes to the irq affinity masks.
554 **/
iavf_irq_affinity_notify(struct irq_affinity_notify * notify,const cpumask_t * mask)555 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
556 const cpumask_t *mask)
557 {
558 struct iavf_q_vector *q_vector =
559 container_of(notify, struct iavf_q_vector, affinity_notify);
560
561 cpumask_copy(&q_vector->affinity_mask, mask);
562 }
563
564 /**
565 * iavf_irq_affinity_release - Callback for affinity notifier release
566 * @ref: internal core kernel usage
567 *
568 * This is a callback function used by the irq_set_affinity_notifier function
569 * to inform the current notification subscriber that they will no longer
570 * receive notifications.
571 **/
iavf_irq_affinity_release(struct kref * ref)572 static void iavf_irq_affinity_release(struct kref *ref) {}
573
574 /**
575 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
576 * @adapter: board private structure
577 * @basename: device basename
578 *
579 * Allocates MSI-X vectors for tx and rx handling, and requests
580 * interrupts from the kernel.
581 **/
582 static int
iavf_request_traffic_irqs(struct iavf_adapter * adapter,char * basename)583 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
584 {
585 unsigned int vector, q_vectors;
586 unsigned int rx_int_idx = 0, tx_int_idx = 0;
587 int irq_num, err;
588 int cpu;
589
590 iavf_irq_disable(adapter);
591 /* Decrement for Other and TCP Timer vectors */
592 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
593
594 for (vector = 0; vector < q_vectors; vector++) {
595 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
596
597 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
598
599 if (q_vector->tx.ring && q_vector->rx.ring) {
600 snprintf(q_vector->name, sizeof(q_vector->name),
601 "iavf-%s-TxRx-%u", basename, rx_int_idx++);
602 tx_int_idx++;
603 } else if (q_vector->rx.ring) {
604 snprintf(q_vector->name, sizeof(q_vector->name),
605 "iavf-%s-rx-%u", basename, rx_int_idx++);
606 } else if (q_vector->tx.ring) {
607 snprintf(q_vector->name, sizeof(q_vector->name),
608 "iavf-%s-tx-%u", basename, tx_int_idx++);
609 } else {
610 /* skip this unused q_vector */
611 continue;
612 }
613 err = request_irq(irq_num,
614 iavf_msix_clean_rings,
615 0,
616 q_vector->name,
617 q_vector);
618 if (err) {
619 dev_info(&adapter->pdev->dev,
620 "Request_irq failed, error: %d\n", err);
621 goto free_queue_irqs;
622 }
623 /* register for affinity change notifications */
624 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
625 q_vector->affinity_notify.release =
626 iavf_irq_affinity_release;
627 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
628 /* Spread the IRQ affinity hints across online CPUs. Note that
629 * get_cpu_mask returns a mask with a permanent lifetime so
630 * it's safe to use as a hint for irq_update_affinity_hint.
631 */
632 cpu = cpumask_local_spread(q_vector->v_idx, -1);
633 irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
634 }
635
636 return 0;
637
638 free_queue_irqs:
639 while (vector) {
640 vector--;
641 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
642 irq_set_affinity_notifier(irq_num, NULL);
643 irq_update_affinity_hint(irq_num, NULL);
644 free_irq(irq_num, &adapter->q_vectors[vector]);
645 }
646 return err;
647 }
648
649 /**
650 * iavf_request_misc_irq - Initialize MSI-X interrupts
651 * @adapter: board private structure
652 *
653 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
654 * vector is only for the admin queue, and stays active even when the netdev
655 * is closed.
656 **/
iavf_request_misc_irq(struct iavf_adapter * adapter)657 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
658 {
659 struct net_device *netdev = adapter->netdev;
660 int err;
661
662 snprintf(adapter->misc_vector_name,
663 sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
664 dev_name(&adapter->pdev->dev));
665 err = request_irq(adapter->msix_entries[0].vector,
666 &iavf_msix_aq, 0,
667 adapter->misc_vector_name, netdev);
668 if (err) {
669 dev_err(&adapter->pdev->dev,
670 "request_irq for %s failed: %d\n",
671 adapter->misc_vector_name, err);
672 free_irq(adapter->msix_entries[0].vector, netdev);
673 }
674 return err;
675 }
676
677 /**
678 * iavf_free_traffic_irqs - Free MSI-X interrupts
679 * @adapter: board private structure
680 *
681 * Frees all MSI-X vectors other than 0.
682 **/
iavf_free_traffic_irqs(struct iavf_adapter * adapter)683 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
684 {
685 int vector, irq_num, q_vectors;
686
687 if (!adapter->msix_entries)
688 return;
689
690 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
691
692 for (vector = 0; vector < q_vectors; vector++) {
693 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
694 irq_set_affinity_notifier(irq_num, NULL);
695 irq_update_affinity_hint(irq_num, NULL);
696 free_irq(irq_num, &adapter->q_vectors[vector]);
697 }
698 }
699
700 /**
701 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
702 * @adapter: board private structure
703 *
704 * Frees MSI-X vector 0.
705 **/
iavf_free_misc_irq(struct iavf_adapter * adapter)706 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
707 {
708 struct net_device *netdev = adapter->netdev;
709
710 if (!adapter->msix_entries)
711 return;
712
713 free_irq(adapter->msix_entries[0].vector, netdev);
714 }
715
716 /**
717 * iavf_configure_tx - Configure Transmit Unit after Reset
718 * @adapter: board private structure
719 *
720 * Configure the Tx unit of the MAC after a reset.
721 **/
iavf_configure_tx(struct iavf_adapter * adapter)722 static void iavf_configure_tx(struct iavf_adapter *adapter)
723 {
724 struct iavf_hw *hw = &adapter->hw;
725 int i;
726
727 for (i = 0; i < adapter->num_active_queues; i++)
728 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
729 }
730
731 /**
732 * iavf_configure_rx - Configure Receive Unit after Reset
733 * @adapter: board private structure
734 *
735 * Configure the Rx unit of the MAC after a reset.
736 **/
iavf_configure_rx(struct iavf_adapter * adapter)737 static void iavf_configure_rx(struct iavf_adapter *adapter)
738 {
739 unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
740 struct iavf_hw *hw = &adapter->hw;
741 int i;
742
743 /* Legacy Rx will always default to a 2048 buffer size. */
744 #if (PAGE_SIZE < 8192)
745 if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
746 struct net_device *netdev = adapter->netdev;
747
748 /* For jumbo frames on systems with 4K pages we have to use
749 * an order 1 page, so we might as well increase the size
750 * of our Rx buffer to make better use of the available space
751 */
752 rx_buf_len = IAVF_RXBUFFER_3072;
753
754 /* We use a 1536 buffer size for configurations with
755 * standard Ethernet mtu. On x86 this gives us enough room
756 * for shared info and 192 bytes of padding.
757 */
758 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
759 (netdev->mtu <= ETH_DATA_LEN))
760 rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
761 }
762 #endif
763
764 for (i = 0; i < adapter->num_active_queues; i++) {
765 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
766 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
767
768 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
769 clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
770 else
771 set_ring_build_skb_enabled(&adapter->rx_rings[i]);
772 }
773 }
774
775 /**
776 * iavf_find_vlan - Search filter list for specific vlan filter
777 * @adapter: board private structure
778 * @vlan: vlan tag
779 *
780 * Returns ptr to the filter object or NULL. Must be called while holding the
781 * mac_vlan_list_lock.
782 **/
783 static struct
iavf_find_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)784 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter,
785 struct iavf_vlan vlan)
786 {
787 struct iavf_vlan_filter *f;
788
789 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
790 if (f->vlan.vid == vlan.vid &&
791 f->vlan.tpid == vlan.tpid)
792 return f;
793 }
794
795 return NULL;
796 }
797
798 /**
799 * iavf_add_vlan - Add a vlan filter to the list
800 * @adapter: board private structure
801 * @vlan: VLAN tag
802 *
803 * Returns ptr to the filter object or NULL when no memory available.
804 **/
805 static struct
iavf_add_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)806 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
807 struct iavf_vlan vlan)
808 {
809 struct iavf_vlan_filter *f = NULL;
810
811 spin_lock_bh(&adapter->mac_vlan_list_lock);
812
813 f = iavf_find_vlan(adapter, vlan);
814 if (!f) {
815 f = kzalloc(sizeof(*f), GFP_ATOMIC);
816 if (!f)
817 goto clearout;
818
819 f->vlan = vlan;
820
821 list_add_tail(&f->list, &adapter->vlan_filter_list);
822 f->state = IAVF_VLAN_ADD;
823 adapter->num_vlan_filters++;
824 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
825 }
826
827 clearout:
828 spin_unlock_bh(&adapter->mac_vlan_list_lock);
829 return f;
830 }
831
832 /**
833 * iavf_del_vlan - Remove a vlan filter from the list
834 * @adapter: board private structure
835 * @vlan: VLAN tag
836 **/
iavf_del_vlan(struct iavf_adapter * adapter,struct iavf_vlan vlan)837 static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
838 {
839 struct iavf_vlan_filter *f;
840
841 spin_lock_bh(&adapter->mac_vlan_list_lock);
842
843 f = iavf_find_vlan(adapter, vlan);
844 if (f) {
845 f->state = IAVF_VLAN_REMOVE;
846 iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
847 }
848
849 spin_unlock_bh(&adapter->mac_vlan_list_lock);
850 }
851
852 /**
853 * iavf_restore_filters
854 * @adapter: board private structure
855 *
856 * Restore existing non MAC filters when VF netdev comes back up
857 **/
iavf_restore_filters(struct iavf_adapter * adapter)858 static void iavf_restore_filters(struct iavf_adapter *adapter)
859 {
860 struct iavf_vlan_filter *f;
861
862 /* re-add all VLAN filters */
863 spin_lock_bh(&adapter->mac_vlan_list_lock);
864
865 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
866 if (f->state == IAVF_VLAN_INACTIVE)
867 f->state = IAVF_VLAN_ADD;
868 }
869
870 spin_unlock_bh(&adapter->mac_vlan_list_lock);
871 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
872 }
873
874 /**
875 * iavf_get_num_vlans_added - get number of VLANs added
876 * @adapter: board private structure
877 */
iavf_get_num_vlans_added(struct iavf_adapter * adapter)878 u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter)
879 {
880 return adapter->num_vlan_filters;
881 }
882
883 /**
884 * iavf_get_max_vlans_allowed - get maximum VLANs allowed for this VF
885 * @adapter: board private structure
886 *
887 * This depends on the negotiated VLAN capability. For VIRTCHNL_VF_OFFLOAD_VLAN,
888 * do not impose a limit as that maintains current behavior and for
889 * VIRTCHNL_VF_OFFLOAD_VLAN_V2, use the maximum allowed sent from the PF.
890 **/
iavf_get_max_vlans_allowed(struct iavf_adapter * adapter)891 static u16 iavf_get_max_vlans_allowed(struct iavf_adapter *adapter)
892 {
893 /* don't impose any limit for VIRTCHNL_VF_OFFLOAD_VLAN since there has
894 * never been a limit on the VF driver side
895 */
896 if (VLAN_ALLOWED(adapter))
897 return VLAN_N_VID;
898 else if (VLAN_V2_ALLOWED(adapter))
899 return adapter->vlan_v2_caps.filtering.max_filters;
900
901 return 0;
902 }
903
904 /**
905 * iavf_max_vlans_added - check if maximum VLANs allowed already exist
906 * @adapter: board private structure
907 **/
iavf_max_vlans_added(struct iavf_adapter * adapter)908 static bool iavf_max_vlans_added(struct iavf_adapter *adapter)
909 {
910 if (iavf_get_num_vlans_added(adapter) <
911 iavf_get_max_vlans_allowed(adapter))
912 return false;
913
914 return true;
915 }
916
917 /**
918 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
919 * @netdev: network device struct
920 * @proto: unused protocol data
921 * @vid: VLAN tag
922 **/
iavf_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)923 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
924 __always_unused __be16 proto, u16 vid)
925 {
926 struct iavf_adapter *adapter = netdev_priv(netdev);
927
928 /* Do not track VLAN 0 filter, always added by the PF on VF init */
929 if (!vid)
930 return 0;
931
932 if (!VLAN_FILTERING_ALLOWED(adapter))
933 return -EIO;
934
935 if (iavf_max_vlans_added(adapter)) {
936 netdev_err(netdev, "Max allowed VLAN filters %u. Remove existing VLANs or disable filtering via Ethtool if supported.\n",
937 iavf_get_max_vlans_allowed(adapter));
938 return -EIO;
939 }
940
941 if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto))))
942 return -ENOMEM;
943
944 return 0;
945 }
946
947 /**
948 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
949 * @netdev: network device struct
950 * @proto: unused protocol data
951 * @vid: VLAN tag
952 **/
iavf_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)953 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
954 __always_unused __be16 proto, u16 vid)
955 {
956 struct iavf_adapter *adapter = netdev_priv(netdev);
957
958 /* We do not track VLAN 0 filter */
959 if (!vid)
960 return 0;
961
962 iavf_del_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)));
963 return 0;
964 }
965
966 /**
967 * iavf_find_filter - Search filter list for specific mac filter
968 * @adapter: board private structure
969 * @macaddr: the MAC address
970 *
971 * Returns ptr to the filter object or NULL. Must be called while holding the
972 * mac_vlan_list_lock.
973 **/
974 static struct
iavf_find_filter(struct iavf_adapter * adapter,const u8 * macaddr)975 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
976 const u8 *macaddr)
977 {
978 struct iavf_mac_filter *f;
979
980 if (!macaddr)
981 return NULL;
982
983 list_for_each_entry(f, &adapter->mac_filter_list, list) {
984 if (ether_addr_equal(macaddr, f->macaddr))
985 return f;
986 }
987 return NULL;
988 }
989
990 /**
991 * iavf_add_filter - Add a mac filter to the filter list
992 * @adapter: board private structure
993 * @macaddr: the MAC address
994 *
995 * Returns ptr to the filter object or NULL when no memory available.
996 **/
iavf_add_filter(struct iavf_adapter * adapter,const u8 * macaddr)997 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
998 const u8 *macaddr)
999 {
1000 struct iavf_mac_filter *f;
1001
1002 if (!macaddr)
1003 return NULL;
1004
1005 f = iavf_find_filter(adapter, macaddr);
1006 if (!f) {
1007 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1008 if (!f)
1009 return f;
1010
1011 ether_addr_copy(f->macaddr, macaddr);
1012
1013 list_add_tail(&f->list, &adapter->mac_filter_list);
1014 f->add = true;
1015 f->add_handled = false;
1016 f->is_new_mac = true;
1017 f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr);
1018 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1019 } else {
1020 f->remove = false;
1021 }
1022
1023 return f;
1024 }
1025
1026 /**
1027 * iavf_replace_primary_mac - Replace current primary address
1028 * @adapter: board private structure
1029 * @new_mac: new MAC address to be applied
1030 *
1031 * Replace current dev_addr and send request to PF for removal of previous
1032 * primary MAC address filter and addition of new primary MAC filter.
1033 * Return 0 for success, -ENOMEM for failure.
1034 *
1035 * Do not call this with mac_vlan_list_lock!
1036 **/
iavf_replace_primary_mac(struct iavf_adapter * adapter,const u8 * new_mac)1037 static int iavf_replace_primary_mac(struct iavf_adapter *adapter,
1038 const u8 *new_mac)
1039 {
1040 struct iavf_hw *hw = &adapter->hw;
1041 struct iavf_mac_filter *new_f;
1042 struct iavf_mac_filter *old_f;
1043
1044 spin_lock_bh(&adapter->mac_vlan_list_lock);
1045
1046 new_f = iavf_add_filter(adapter, new_mac);
1047 if (!new_f) {
1048 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1049 return -ENOMEM;
1050 }
1051
1052 old_f = iavf_find_filter(adapter, hw->mac.addr);
1053 if (old_f) {
1054 old_f->is_primary = false;
1055 old_f->remove = true;
1056 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1057 }
1058 /* Always send the request to add if changing primary MAC,
1059 * even if filter is already present on the list
1060 */
1061 new_f->is_primary = true;
1062 new_f->add = true;
1063 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
1064 ether_addr_copy(hw->mac.addr, new_mac);
1065
1066 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1067
1068 /* schedule the watchdog task to immediately process the request */
1069 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1070 return 0;
1071 }
1072
1073 /**
1074 * iavf_is_mac_set_handled - wait for a response to set MAC from PF
1075 * @netdev: network interface device structure
1076 * @macaddr: MAC address to set
1077 *
1078 * Returns true on success, false on failure
1079 */
iavf_is_mac_set_handled(struct net_device * netdev,const u8 * macaddr)1080 static bool iavf_is_mac_set_handled(struct net_device *netdev,
1081 const u8 *macaddr)
1082 {
1083 struct iavf_adapter *adapter = netdev_priv(netdev);
1084 struct iavf_mac_filter *f;
1085 bool ret = false;
1086
1087 spin_lock_bh(&adapter->mac_vlan_list_lock);
1088
1089 f = iavf_find_filter(adapter, macaddr);
1090
1091 if (!f || (!f->add && f->add_handled))
1092 ret = true;
1093
1094 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1095
1096 return ret;
1097 }
1098
1099 /**
1100 * iavf_set_mac - NDO callback to set port MAC address
1101 * @netdev: network interface device structure
1102 * @p: pointer to an address structure
1103 *
1104 * Returns 0 on success, negative on failure
1105 */
iavf_set_mac(struct net_device * netdev,void * p)1106 static int iavf_set_mac(struct net_device *netdev, void *p)
1107 {
1108 struct iavf_adapter *adapter = netdev_priv(netdev);
1109 struct sockaddr *addr = p;
1110 int ret;
1111
1112 if (!is_valid_ether_addr(addr->sa_data))
1113 return -EADDRNOTAVAIL;
1114
1115 ret = iavf_replace_primary_mac(adapter, addr->sa_data);
1116
1117 if (ret)
1118 return ret;
1119
1120 ret = wait_event_interruptible_timeout(adapter->vc_waitqueue,
1121 iavf_is_mac_set_handled(netdev, addr->sa_data),
1122 msecs_to_jiffies(2500));
1123
1124 /* If ret < 0 then it means wait was interrupted.
1125 * If ret == 0 then it means we got a timeout.
1126 * else it means we got response for set MAC from PF,
1127 * check if netdev MAC was updated to requested MAC,
1128 * if yes then set MAC succeeded otherwise it failed return -EACCES
1129 */
1130 if (ret < 0)
1131 return ret;
1132
1133 if (!ret)
1134 return -EAGAIN;
1135
1136 if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
1137 return -EACCES;
1138
1139 return 0;
1140 }
1141
1142 /**
1143 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
1144 * @netdev: the netdevice
1145 * @addr: address to add
1146 *
1147 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1148 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1149 */
iavf_addr_sync(struct net_device * netdev,const u8 * addr)1150 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
1151 {
1152 struct iavf_adapter *adapter = netdev_priv(netdev);
1153
1154 if (iavf_add_filter(adapter, addr))
1155 return 0;
1156 else
1157 return -ENOMEM;
1158 }
1159
1160 /**
1161 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1162 * @netdev: the netdevice
1163 * @addr: address to add
1164 *
1165 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1166 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1167 */
iavf_addr_unsync(struct net_device * netdev,const u8 * addr)1168 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
1169 {
1170 struct iavf_adapter *adapter = netdev_priv(netdev);
1171 struct iavf_mac_filter *f;
1172
1173 /* Under some circumstances, we might receive a request to delete
1174 * our own device address from our uc list. Because we store the
1175 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1176 * such requests and not delete our device address from this list.
1177 */
1178 if (ether_addr_equal(addr, netdev->dev_addr))
1179 return 0;
1180
1181 f = iavf_find_filter(adapter, addr);
1182 if (f) {
1183 f->remove = true;
1184 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1185 }
1186 return 0;
1187 }
1188
1189 /**
1190 * iavf_set_rx_mode - NDO callback to set the netdev filters
1191 * @netdev: network interface device structure
1192 **/
iavf_set_rx_mode(struct net_device * netdev)1193 static void iavf_set_rx_mode(struct net_device *netdev)
1194 {
1195 struct iavf_adapter *adapter = netdev_priv(netdev);
1196
1197 spin_lock_bh(&adapter->mac_vlan_list_lock);
1198 __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1199 __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
1200 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1201
1202 if (netdev->flags & IFF_PROMISC &&
1203 !(adapter->flags & IAVF_FLAG_PROMISC_ON))
1204 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
1205 else if (!(netdev->flags & IFF_PROMISC) &&
1206 adapter->flags & IAVF_FLAG_PROMISC_ON)
1207 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
1208
1209 if (netdev->flags & IFF_ALLMULTI &&
1210 !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
1211 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
1212 else if (!(netdev->flags & IFF_ALLMULTI) &&
1213 adapter->flags & IAVF_FLAG_ALLMULTI_ON)
1214 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
1215 }
1216
1217 /**
1218 * iavf_napi_enable_all - enable NAPI on all queue vectors
1219 * @adapter: board private structure
1220 **/
iavf_napi_enable_all(struct iavf_adapter * adapter)1221 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
1222 {
1223 int q_idx;
1224 struct iavf_q_vector *q_vector;
1225 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1226
1227 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1228 struct napi_struct *napi;
1229
1230 q_vector = &adapter->q_vectors[q_idx];
1231 napi = &q_vector->napi;
1232 napi_enable(napi);
1233 }
1234 }
1235
1236 /**
1237 * iavf_napi_disable_all - disable NAPI on all queue vectors
1238 * @adapter: board private structure
1239 **/
iavf_napi_disable_all(struct iavf_adapter * adapter)1240 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
1241 {
1242 int q_idx;
1243 struct iavf_q_vector *q_vector;
1244 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1245
1246 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1247 q_vector = &adapter->q_vectors[q_idx];
1248 napi_disable(&q_vector->napi);
1249 }
1250 }
1251
1252 /**
1253 * iavf_configure - set up transmit and receive data structures
1254 * @adapter: board private structure
1255 **/
iavf_configure(struct iavf_adapter * adapter)1256 static void iavf_configure(struct iavf_adapter *adapter)
1257 {
1258 struct net_device *netdev = adapter->netdev;
1259 int i;
1260
1261 iavf_set_rx_mode(netdev);
1262
1263 iavf_configure_tx(adapter);
1264 iavf_configure_rx(adapter);
1265 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
1266
1267 for (i = 0; i < adapter->num_active_queues; i++) {
1268 struct iavf_ring *ring = &adapter->rx_rings[i];
1269
1270 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
1271 }
1272 }
1273
1274 /**
1275 * iavf_up_complete - Finish the last steps of bringing up a connection
1276 * @adapter: board private structure
1277 *
1278 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1279 **/
iavf_up_complete(struct iavf_adapter * adapter)1280 static void iavf_up_complete(struct iavf_adapter *adapter)
1281 {
1282 iavf_change_state(adapter, __IAVF_RUNNING);
1283 clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1284
1285 iavf_napi_enable_all(adapter);
1286
1287 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
1288 if (CLIENT_ENABLED(adapter))
1289 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
1290 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1291 }
1292
1293 /**
1294 * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
1295 * yet and mark other to be removed.
1296 * @adapter: board private structure
1297 **/
iavf_clear_mac_vlan_filters(struct iavf_adapter * adapter)1298 static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
1299 {
1300 struct iavf_vlan_filter *vlf, *vlftmp;
1301 struct iavf_mac_filter *f, *ftmp;
1302
1303 spin_lock_bh(&adapter->mac_vlan_list_lock);
1304 /* clear the sync flag on all filters */
1305 __dev_uc_unsync(adapter->netdev, NULL);
1306 __dev_mc_unsync(adapter->netdev, NULL);
1307
1308 /* remove all MAC filters */
1309 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
1310 list) {
1311 if (f->add) {
1312 list_del(&f->list);
1313 kfree(f);
1314 } else {
1315 f->remove = true;
1316 }
1317 }
1318
1319 /* disable all VLAN filters */
1320 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
1321 list)
1322 vlf->state = IAVF_VLAN_DISABLE;
1323
1324 spin_unlock_bh(&adapter->mac_vlan_list_lock);
1325 }
1326
1327 /**
1328 * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
1329 * mark other to be removed.
1330 * @adapter: board private structure
1331 **/
iavf_clear_cloud_filters(struct iavf_adapter * adapter)1332 static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
1333 {
1334 struct iavf_cloud_filter *cf, *cftmp;
1335
1336 /* remove all cloud filters */
1337 spin_lock_bh(&adapter->cloud_filter_list_lock);
1338 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
1339 list) {
1340 if (cf->add) {
1341 list_del(&cf->list);
1342 kfree(cf);
1343 adapter->num_cloud_filters--;
1344 } else {
1345 cf->del = true;
1346 }
1347 }
1348 spin_unlock_bh(&adapter->cloud_filter_list_lock);
1349 }
1350
1351 /**
1352 * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
1353 * other to be removed.
1354 * @adapter: board private structure
1355 **/
iavf_clear_fdir_filters(struct iavf_adapter * adapter)1356 static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
1357 {
1358 struct iavf_fdir_fltr *fdir, *fdirtmp;
1359
1360 /* remove all Flow Director filters */
1361 spin_lock_bh(&adapter->fdir_fltr_lock);
1362 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
1363 list) {
1364 if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
1365 list_del(&fdir->list);
1366 kfree(fdir);
1367 adapter->fdir_active_fltr--;
1368 } else {
1369 fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
1370 }
1371 }
1372 spin_unlock_bh(&adapter->fdir_fltr_lock);
1373 }
1374
1375 /**
1376 * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
1377 * other to be removed.
1378 * @adapter: board private structure
1379 **/
iavf_clear_adv_rss_conf(struct iavf_adapter * adapter)1380 static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
1381 {
1382 struct iavf_adv_rss *rss, *rsstmp;
1383
1384 /* remove all advance RSS configuration */
1385 spin_lock_bh(&adapter->adv_rss_lock);
1386 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
1387 list) {
1388 if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
1389 list_del(&rss->list);
1390 kfree(rss);
1391 } else {
1392 rss->state = IAVF_ADV_RSS_DEL_REQUEST;
1393 }
1394 }
1395 spin_unlock_bh(&adapter->adv_rss_lock);
1396 }
1397
1398 /**
1399 * iavf_down - Shutdown the connection processing
1400 * @adapter: board private structure
1401 *
1402 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
1403 **/
iavf_down(struct iavf_adapter * adapter)1404 void iavf_down(struct iavf_adapter *adapter)
1405 {
1406 struct net_device *netdev = adapter->netdev;
1407
1408 if (adapter->state <= __IAVF_DOWN_PENDING)
1409 return;
1410
1411 netif_carrier_off(netdev);
1412 netif_tx_disable(netdev);
1413 adapter->link_up = false;
1414 iavf_napi_disable_all(adapter);
1415 iavf_irq_disable(adapter);
1416
1417 iavf_clear_mac_vlan_filters(adapter);
1418 iavf_clear_cloud_filters(adapter);
1419 iavf_clear_fdir_filters(adapter);
1420 iavf_clear_adv_rss_conf(adapter);
1421
1422 if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1423 !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
1424 /* cancel any current operation */
1425 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1426 /* Schedule operations to close down the HW. Don't wait
1427 * here for this to complete. The watchdog is still running
1428 * and it will take care of this.
1429 */
1430 if (!list_empty(&adapter->mac_filter_list))
1431 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
1432 if (!list_empty(&adapter->vlan_filter_list))
1433 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1434 if (!list_empty(&adapter->cloud_filter_list))
1435 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1436 if (!list_empty(&adapter->fdir_list_head))
1437 adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
1438 if (!list_empty(&adapter->adv_rss_list_head))
1439 adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
1440 }
1441
1442 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1443 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
1444 }
1445
1446 /**
1447 * iavf_acquire_msix_vectors - Setup the MSIX capability
1448 * @adapter: board private structure
1449 * @vectors: number of vectors to request
1450 *
1451 * Work with the OS to set up the MSIX vectors needed.
1452 *
1453 * Returns 0 on success, negative on failure
1454 **/
1455 static int
iavf_acquire_msix_vectors(struct iavf_adapter * adapter,int vectors)1456 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1457 {
1458 int err, vector_threshold;
1459
1460 /* We'll want at least 3 (vector_threshold):
1461 * 0) Other (Admin Queue and link, mostly)
1462 * 1) TxQ[0] Cleanup
1463 * 2) RxQ[0] Cleanup
1464 */
1465 vector_threshold = MIN_MSIX_COUNT;
1466
1467 /* The more we get, the more we will assign to Tx/Rx Cleanup
1468 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1469 * Right now, we simply care about how many we'll get; we'll
1470 * set them up later while requesting irq's.
1471 */
1472 err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1473 vector_threshold, vectors);
1474 if (err < 0) {
1475 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1476 kfree(adapter->msix_entries);
1477 adapter->msix_entries = NULL;
1478 return err;
1479 }
1480
1481 /* Adjust for only the vectors we'll use, which is minimum
1482 * of max_msix_q_vectors + NONQ_VECS, or the number of
1483 * vectors we were allocated.
1484 */
1485 adapter->num_msix_vectors = err;
1486 return 0;
1487 }
1488
1489 /**
1490 * iavf_free_queues - Free memory for all rings
1491 * @adapter: board private structure to initialize
1492 *
1493 * Free all of the memory associated with queue pairs.
1494 **/
iavf_free_queues(struct iavf_adapter * adapter)1495 static void iavf_free_queues(struct iavf_adapter *adapter)
1496 {
1497 if (!adapter->vsi_res)
1498 return;
1499 adapter->num_active_queues = 0;
1500 kfree(adapter->tx_rings);
1501 adapter->tx_rings = NULL;
1502 kfree(adapter->rx_rings);
1503 adapter->rx_rings = NULL;
1504 }
1505
1506 /**
1507 * iavf_set_queue_vlan_tag_loc - set location for VLAN tag offload
1508 * @adapter: board private structure
1509 *
1510 * Based on negotiated capabilities, the VLAN tag needs to be inserted and/or
1511 * stripped in certain descriptor fields. Instead of checking the offload
1512 * capability bits in the hot path, cache the location the ring specific
1513 * flags.
1514 */
iavf_set_queue_vlan_tag_loc(struct iavf_adapter * adapter)1515 void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter)
1516 {
1517 int i;
1518
1519 for (i = 0; i < adapter->num_active_queues; i++) {
1520 struct iavf_ring *tx_ring = &adapter->tx_rings[i];
1521 struct iavf_ring *rx_ring = &adapter->rx_rings[i];
1522
1523 /* prevent multiple L2TAG bits being set after VFR */
1524 tx_ring->flags &=
1525 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1526 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2);
1527 rx_ring->flags &=
1528 ~(IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 |
1529 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2);
1530
1531 if (VLAN_ALLOWED(adapter)) {
1532 tx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1533 rx_ring->flags |= IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1534 } else if (VLAN_V2_ALLOWED(adapter)) {
1535 struct virtchnl_vlan_supported_caps *stripping_support;
1536 struct virtchnl_vlan_supported_caps *insertion_support;
1537
1538 stripping_support =
1539 &adapter->vlan_v2_caps.offloads.stripping_support;
1540 insertion_support =
1541 &adapter->vlan_v2_caps.offloads.insertion_support;
1542
1543 if (stripping_support->outer) {
1544 if (stripping_support->outer &
1545 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1546 rx_ring->flags |=
1547 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1548 else if (stripping_support->outer &
1549 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1550 rx_ring->flags |=
1551 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1552 } else if (stripping_support->inner) {
1553 if (stripping_support->inner &
1554 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1555 rx_ring->flags |=
1556 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1557 else if (stripping_support->inner &
1558 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2_2)
1559 rx_ring->flags |=
1560 IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2;
1561 }
1562
1563 if (insertion_support->outer) {
1564 if (insertion_support->outer &
1565 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1566 tx_ring->flags |=
1567 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1568 else if (insertion_support->outer &
1569 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1570 tx_ring->flags |=
1571 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1572 } else if (insertion_support->inner) {
1573 if (insertion_support->inner &
1574 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG1)
1575 tx_ring->flags |=
1576 IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1;
1577 else if (insertion_support->inner &
1578 VIRTCHNL_VLAN_TAG_LOCATION_L2TAG2)
1579 tx_ring->flags |=
1580 IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2;
1581 }
1582 }
1583 }
1584 }
1585
1586 /**
1587 * iavf_alloc_queues - Allocate memory for all rings
1588 * @adapter: board private structure to initialize
1589 *
1590 * We allocate one ring per queue at run-time since we don't know the
1591 * number of queues at compile-time. The polling_netdev array is
1592 * intended for Multiqueue, but should work fine with a single queue.
1593 **/
iavf_alloc_queues(struct iavf_adapter * adapter)1594 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1595 {
1596 int i, num_active_queues;
1597
1598 /* If we're in reset reallocating queues we don't actually know yet for
1599 * certain the PF gave us the number of queues we asked for but we'll
1600 * assume it did. Once basic reset is finished we'll confirm once we
1601 * start negotiating config with PF.
1602 */
1603 if (adapter->num_req_queues)
1604 num_active_queues = adapter->num_req_queues;
1605 else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1606 adapter->num_tc)
1607 num_active_queues = adapter->ch_config.total_qps;
1608 else
1609 num_active_queues = min_t(int,
1610 adapter->vsi_res->num_queue_pairs,
1611 (int)(num_online_cpus()));
1612
1613
1614 adapter->tx_rings = kcalloc(num_active_queues,
1615 sizeof(struct iavf_ring), GFP_KERNEL);
1616 if (!adapter->tx_rings)
1617 goto err_out;
1618 adapter->rx_rings = kcalloc(num_active_queues,
1619 sizeof(struct iavf_ring), GFP_KERNEL);
1620 if (!adapter->rx_rings)
1621 goto err_out;
1622
1623 for (i = 0; i < num_active_queues; i++) {
1624 struct iavf_ring *tx_ring;
1625 struct iavf_ring *rx_ring;
1626
1627 tx_ring = &adapter->tx_rings[i];
1628
1629 tx_ring->queue_index = i;
1630 tx_ring->netdev = adapter->netdev;
1631 tx_ring->dev = &adapter->pdev->dev;
1632 tx_ring->count = adapter->tx_desc_count;
1633 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1634 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1635 tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1636
1637 rx_ring = &adapter->rx_rings[i];
1638 rx_ring->queue_index = i;
1639 rx_ring->netdev = adapter->netdev;
1640 rx_ring->dev = &adapter->pdev->dev;
1641 rx_ring->count = adapter->rx_desc_count;
1642 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1643 }
1644
1645 adapter->num_active_queues = num_active_queues;
1646
1647 iavf_set_queue_vlan_tag_loc(adapter);
1648
1649 return 0;
1650
1651 err_out:
1652 iavf_free_queues(adapter);
1653 return -ENOMEM;
1654 }
1655
1656 /**
1657 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1658 * @adapter: board private structure to initialize
1659 *
1660 * Attempt to configure the interrupts using the best available
1661 * capabilities of the hardware and the kernel.
1662 **/
iavf_set_interrupt_capability(struct iavf_adapter * adapter)1663 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1664 {
1665 int vector, v_budget;
1666 int pairs = 0;
1667 int err = 0;
1668
1669 if (!adapter->vsi_res) {
1670 err = -EIO;
1671 goto out;
1672 }
1673 pairs = adapter->num_active_queues;
1674
1675 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1676 * us much good if we have more vectors than CPUs. However, we already
1677 * limit the total number of queues by the number of CPUs so we do not
1678 * need any further limiting here.
1679 */
1680 v_budget = min_t(int, pairs + NONQ_VECS,
1681 (int)adapter->vf_res->max_vectors);
1682
1683 adapter->msix_entries = kcalloc(v_budget,
1684 sizeof(struct msix_entry), GFP_KERNEL);
1685 if (!adapter->msix_entries) {
1686 err = -ENOMEM;
1687 goto out;
1688 }
1689
1690 for (vector = 0; vector < v_budget; vector++)
1691 adapter->msix_entries[vector].entry = vector;
1692
1693 err = iavf_acquire_msix_vectors(adapter, v_budget);
1694 if (!err)
1695 iavf_schedule_finish_config(adapter);
1696
1697 out:
1698 return err;
1699 }
1700
1701 /**
1702 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1703 * @adapter: board private structure
1704 *
1705 * Return 0 on success, negative on failure
1706 **/
iavf_config_rss_aq(struct iavf_adapter * adapter)1707 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1708 {
1709 struct iavf_aqc_get_set_rss_key_data *rss_key =
1710 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1711 struct iavf_hw *hw = &adapter->hw;
1712 enum iavf_status status;
1713
1714 if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1715 /* bail because we already have a command pending */
1716 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1717 adapter->current_op);
1718 return -EBUSY;
1719 }
1720
1721 status = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1722 if (status) {
1723 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1724 iavf_stat_str(hw, status),
1725 iavf_aq_str(hw, hw->aq.asq_last_status));
1726 return iavf_status_to_errno(status);
1727
1728 }
1729
1730 status = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1731 adapter->rss_lut, adapter->rss_lut_size);
1732 if (status) {
1733 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1734 iavf_stat_str(hw, status),
1735 iavf_aq_str(hw, hw->aq.asq_last_status));
1736 return iavf_status_to_errno(status);
1737 }
1738
1739 return 0;
1740
1741 }
1742
1743 /**
1744 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1745 * @adapter: board private structure
1746 *
1747 * Returns 0 on success, negative on failure
1748 **/
iavf_config_rss_reg(struct iavf_adapter * adapter)1749 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1750 {
1751 struct iavf_hw *hw = &adapter->hw;
1752 u32 *dw;
1753 u16 i;
1754
1755 dw = (u32 *)adapter->rss_key;
1756 for (i = 0; i <= adapter->rss_key_size / 4; i++)
1757 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1758
1759 dw = (u32 *)adapter->rss_lut;
1760 for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1761 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1762
1763 iavf_flush(hw);
1764
1765 return 0;
1766 }
1767
1768 /**
1769 * iavf_config_rss - Configure RSS keys and lut
1770 * @adapter: board private structure
1771 *
1772 * Returns 0 on success, negative on failure
1773 **/
iavf_config_rss(struct iavf_adapter * adapter)1774 int iavf_config_rss(struct iavf_adapter *adapter)
1775 {
1776
1777 if (RSS_PF(adapter)) {
1778 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1779 IAVF_FLAG_AQ_SET_RSS_KEY;
1780 return 0;
1781 } else if (RSS_AQ(adapter)) {
1782 return iavf_config_rss_aq(adapter);
1783 } else {
1784 return iavf_config_rss_reg(adapter);
1785 }
1786 }
1787
1788 /**
1789 * iavf_fill_rss_lut - Fill the lut with default values
1790 * @adapter: board private structure
1791 **/
iavf_fill_rss_lut(struct iavf_adapter * adapter)1792 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1793 {
1794 u16 i;
1795
1796 for (i = 0; i < adapter->rss_lut_size; i++)
1797 adapter->rss_lut[i] = i % adapter->num_active_queues;
1798 }
1799
1800 /**
1801 * iavf_init_rss - Prepare for RSS
1802 * @adapter: board private structure
1803 *
1804 * Return 0 on success, negative on failure
1805 **/
iavf_init_rss(struct iavf_adapter * adapter)1806 static int iavf_init_rss(struct iavf_adapter *adapter)
1807 {
1808 struct iavf_hw *hw = &adapter->hw;
1809
1810 if (!RSS_PF(adapter)) {
1811 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1812 if (adapter->vf_res->vf_cap_flags &
1813 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1814 adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1815 else
1816 adapter->hena = IAVF_DEFAULT_RSS_HENA;
1817
1818 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1819 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1820 }
1821
1822 iavf_fill_rss_lut(adapter);
1823 netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1824
1825 return iavf_config_rss(adapter);
1826 }
1827
1828 /**
1829 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1830 * @adapter: board private structure to initialize
1831 *
1832 * We allocate one q_vector per queue interrupt. If allocation fails we
1833 * return -ENOMEM.
1834 **/
iavf_alloc_q_vectors(struct iavf_adapter * adapter)1835 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1836 {
1837 int q_idx = 0, num_q_vectors;
1838 struct iavf_q_vector *q_vector;
1839
1840 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1841 adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1842 GFP_KERNEL);
1843 if (!adapter->q_vectors)
1844 return -ENOMEM;
1845
1846 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1847 q_vector = &adapter->q_vectors[q_idx];
1848 q_vector->adapter = adapter;
1849 q_vector->vsi = &adapter->vsi;
1850 q_vector->v_idx = q_idx;
1851 q_vector->reg_idx = q_idx;
1852 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1853 netif_napi_add(adapter->netdev, &q_vector->napi,
1854 iavf_napi_poll);
1855 }
1856
1857 return 0;
1858 }
1859
1860 /**
1861 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1862 * @adapter: board private structure to initialize
1863 *
1864 * This function frees the memory allocated to the q_vectors. In addition if
1865 * NAPI is enabled it will delete any references to the NAPI struct prior
1866 * to freeing the q_vector.
1867 **/
iavf_free_q_vectors(struct iavf_adapter * adapter)1868 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1869 {
1870 int q_idx, num_q_vectors;
1871
1872 if (!adapter->q_vectors)
1873 return;
1874
1875 num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1876
1877 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1878 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1879
1880 netif_napi_del(&q_vector->napi);
1881 }
1882 kfree(adapter->q_vectors);
1883 adapter->q_vectors = NULL;
1884 }
1885
1886 /**
1887 * iavf_reset_interrupt_capability - Reset MSIX setup
1888 * @adapter: board private structure
1889 *
1890 **/
iavf_reset_interrupt_capability(struct iavf_adapter * adapter)1891 static void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1892 {
1893 if (!adapter->msix_entries)
1894 return;
1895
1896 pci_disable_msix(adapter->pdev);
1897 kfree(adapter->msix_entries);
1898 adapter->msix_entries = NULL;
1899 }
1900
1901 /**
1902 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1903 * @adapter: board private structure to initialize
1904 *
1905 **/
iavf_init_interrupt_scheme(struct iavf_adapter * adapter)1906 static int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1907 {
1908 int err;
1909
1910 err = iavf_alloc_queues(adapter);
1911 if (err) {
1912 dev_err(&adapter->pdev->dev,
1913 "Unable to allocate memory for queues\n");
1914 goto err_alloc_queues;
1915 }
1916
1917 err = iavf_set_interrupt_capability(adapter);
1918 if (err) {
1919 dev_err(&adapter->pdev->dev,
1920 "Unable to setup interrupt capabilities\n");
1921 goto err_set_interrupt;
1922 }
1923
1924 err = iavf_alloc_q_vectors(adapter);
1925 if (err) {
1926 dev_err(&adapter->pdev->dev,
1927 "Unable to allocate memory for queue vectors\n");
1928 goto err_alloc_q_vectors;
1929 }
1930
1931 /* If we've made it so far while ADq flag being ON, then we haven't
1932 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1933 * resources have been allocated in the reset path.
1934 * Now we can truly claim that ADq is enabled.
1935 */
1936 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1937 adapter->num_tc)
1938 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1939 adapter->num_tc);
1940
1941 dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1942 (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1943 adapter->num_active_queues);
1944
1945 return 0;
1946 err_alloc_q_vectors:
1947 iavf_reset_interrupt_capability(adapter);
1948 err_set_interrupt:
1949 iavf_free_queues(adapter);
1950 err_alloc_queues:
1951 return err;
1952 }
1953
1954 /**
1955 * iavf_free_rss - Free memory used by RSS structs
1956 * @adapter: board private structure
1957 **/
iavf_free_rss(struct iavf_adapter * adapter)1958 static void iavf_free_rss(struct iavf_adapter *adapter)
1959 {
1960 kfree(adapter->rss_key);
1961 adapter->rss_key = NULL;
1962
1963 kfree(adapter->rss_lut);
1964 adapter->rss_lut = NULL;
1965 }
1966
1967 /**
1968 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1969 * @adapter: board private structure
1970 * @running: true if adapter->state == __IAVF_RUNNING
1971 *
1972 * Returns 0 on success, negative on failure
1973 **/
iavf_reinit_interrupt_scheme(struct iavf_adapter * adapter,bool running)1974 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool running)
1975 {
1976 struct net_device *netdev = adapter->netdev;
1977 int err;
1978
1979 if (running)
1980 iavf_free_traffic_irqs(adapter);
1981 iavf_free_misc_irq(adapter);
1982 iavf_reset_interrupt_capability(adapter);
1983 iavf_free_q_vectors(adapter);
1984 iavf_free_queues(adapter);
1985
1986 err = iavf_init_interrupt_scheme(adapter);
1987 if (err)
1988 goto err;
1989
1990 netif_tx_stop_all_queues(netdev);
1991
1992 err = iavf_request_misc_irq(adapter);
1993 if (err)
1994 goto err;
1995
1996 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1997
1998 iavf_map_rings_to_vectors(adapter);
1999 err:
2000 return err;
2001 }
2002
2003 /**
2004 * iavf_finish_config - do all netdev work that needs RTNL
2005 * @work: our work_struct
2006 *
2007 * Do work that needs both RTNL and crit_lock.
2008 **/
iavf_finish_config(struct work_struct * work)2009 static void iavf_finish_config(struct work_struct *work)
2010 {
2011 struct iavf_adapter *adapter;
2012 int pairs, err;
2013
2014 adapter = container_of(work, struct iavf_adapter, finish_config);
2015
2016 /* Always take RTNL first to prevent circular lock dependency */
2017 rtnl_lock();
2018 mutex_lock(&adapter->crit_lock);
2019
2020 if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
2021 adapter->netdev_registered &&
2022 !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
2023 netdev_update_features(adapter->netdev);
2024 adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
2025 }
2026
2027 switch (adapter->state) {
2028 case __IAVF_DOWN:
2029 if (!adapter->netdev_registered) {
2030 err = register_netdevice(adapter->netdev);
2031 if (err) {
2032 dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
2033 err);
2034
2035 /* go back and try again.*/
2036 iavf_free_rss(adapter);
2037 iavf_free_misc_irq(adapter);
2038 iavf_reset_interrupt_capability(adapter);
2039 iavf_change_state(adapter,
2040 __IAVF_INIT_CONFIG_ADAPTER);
2041 goto out;
2042 }
2043 adapter->netdev_registered = true;
2044 }
2045
2046 /* Set the real number of queues when reset occurs while
2047 * state == __IAVF_DOWN
2048 */
2049 fallthrough;
2050 case __IAVF_RUNNING:
2051 pairs = adapter->num_active_queues;
2052 netif_set_real_num_rx_queues(adapter->netdev, pairs);
2053 netif_set_real_num_tx_queues(adapter->netdev, pairs);
2054 break;
2055
2056 default:
2057 break;
2058 }
2059
2060 out:
2061 mutex_unlock(&adapter->crit_lock);
2062 rtnl_unlock();
2063 }
2064
2065 /**
2066 * iavf_schedule_finish_config - Set the flags and schedule a reset event
2067 * @adapter: board private structure
2068 **/
iavf_schedule_finish_config(struct iavf_adapter * adapter)2069 void iavf_schedule_finish_config(struct iavf_adapter *adapter)
2070 {
2071 if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2072 queue_work(adapter->wq, &adapter->finish_config);
2073 }
2074
2075 /**
2076 * iavf_process_aq_command - process aq_required flags
2077 * and sends aq command
2078 * @adapter: pointer to iavf adapter structure
2079 *
2080 * Returns 0 on success
2081 * Returns error code if no command was sent
2082 * or error code if the command failed.
2083 **/
iavf_process_aq_command(struct iavf_adapter * adapter)2084 static int iavf_process_aq_command(struct iavf_adapter *adapter)
2085 {
2086 if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
2087 return iavf_send_vf_config_msg(adapter);
2088 if (adapter->aq_required & IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS)
2089 return iavf_send_vf_offload_vlan_v2_msg(adapter);
2090 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
2091 iavf_disable_queues(adapter);
2092 return 0;
2093 }
2094
2095 if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
2096 iavf_map_queues(adapter);
2097 return 0;
2098 }
2099
2100 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
2101 iavf_add_ether_addrs(adapter);
2102 return 0;
2103 }
2104
2105 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
2106 iavf_add_vlans(adapter);
2107 return 0;
2108 }
2109
2110 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
2111 iavf_del_ether_addrs(adapter);
2112 return 0;
2113 }
2114
2115 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
2116 iavf_del_vlans(adapter);
2117 return 0;
2118 }
2119
2120 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
2121 iavf_enable_vlan_stripping(adapter);
2122 return 0;
2123 }
2124
2125 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
2126 iavf_disable_vlan_stripping(adapter);
2127 return 0;
2128 }
2129
2130 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
2131 iavf_configure_queues(adapter);
2132 return 0;
2133 }
2134
2135 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
2136 iavf_enable_queues(adapter);
2137 return 0;
2138 }
2139
2140 if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
2141 /* This message goes straight to the firmware, not the
2142 * PF, so we don't have to set current_op as we will
2143 * not get a response through the ARQ.
2144 */
2145 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
2146 return 0;
2147 }
2148 if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
2149 iavf_get_hena(adapter);
2150 return 0;
2151 }
2152 if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
2153 iavf_set_hena(adapter);
2154 return 0;
2155 }
2156 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
2157 iavf_set_rss_key(adapter);
2158 return 0;
2159 }
2160 if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
2161 iavf_set_rss_lut(adapter);
2162 return 0;
2163 }
2164
2165 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
2166 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
2167 FLAG_VF_MULTICAST_PROMISC);
2168 return 0;
2169 }
2170
2171 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
2172 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
2173 return 0;
2174 }
2175 if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
2176 (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
2177 iavf_set_promiscuous(adapter, 0);
2178 return 0;
2179 }
2180
2181 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
2182 iavf_enable_channels(adapter);
2183 return 0;
2184 }
2185
2186 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
2187 iavf_disable_channels(adapter);
2188 return 0;
2189 }
2190 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2191 iavf_add_cloud_filter(adapter);
2192 return 0;
2193 }
2194
2195 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2196 iavf_del_cloud_filter(adapter);
2197 return 0;
2198 }
2199 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
2200 iavf_del_cloud_filter(adapter);
2201 return 0;
2202 }
2203 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
2204 iavf_add_cloud_filter(adapter);
2205 return 0;
2206 }
2207 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
2208 iavf_add_fdir_filter(adapter);
2209 return IAVF_SUCCESS;
2210 }
2211 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
2212 iavf_del_fdir_filter(adapter);
2213 return IAVF_SUCCESS;
2214 }
2215 if (adapter->aq_required & IAVF_FLAG_AQ_ADD_ADV_RSS_CFG) {
2216 iavf_add_adv_rss_cfg(adapter);
2217 return 0;
2218 }
2219 if (adapter->aq_required & IAVF_FLAG_AQ_DEL_ADV_RSS_CFG) {
2220 iavf_del_adv_rss_cfg(adapter);
2221 return 0;
2222 }
2223 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING) {
2224 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2225 return 0;
2226 }
2227 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING) {
2228 iavf_disable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2229 return 0;
2230 }
2231 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING) {
2232 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021Q);
2233 return 0;
2234 }
2235 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING) {
2236 iavf_enable_vlan_stripping_v2(adapter, ETH_P_8021AD);
2237 return 0;
2238 }
2239 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION) {
2240 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2241 return 0;
2242 }
2243 if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION) {
2244 iavf_disable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2245 return 0;
2246 }
2247 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION) {
2248 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021Q);
2249 return 0;
2250 }
2251 if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION) {
2252 iavf_enable_vlan_insertion_v2(adapter, ETH_P_8021AD);
2253 return 0;
2254 }
2255
2256 if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
2257 iavf_request_stats(adapter);
2258 return 0;
2259 }
2260
2261 return -EAGAIN;
2262 }
2263
2264 /**
2265 * iavf_set_vlan_offload_features - set VLAN offload configuration
2266 * @adapter: board private structure
2267 * @prev_features: previous features used for comparison
2268 * @features: updated features used for configuration
2269 *
2270 * Set the aq_required bit(s) based on the requested features passed in to
2271 * configure VLAN stripping and/or VLAN insertion if supported. Also, schedule
2272 * the watchdog if any changes are requested to expedite the request via
2273 * virtchnl.
2274 **/
2275 static void
iavf_set_vlan_offload_features(struct iavf_adapter * adapter,netdev_features_t prev_features,netdev_features_t features)2276 iavf_set_vlan_offload_features(struct iavf_adapter *adapter,
2277 netdev_features_t prev_features,
2278 netdev_features_t features)
2279 {
2280 bool enable_stripping = true, enable_insertion = true;
2281 u16 vlan_ethertype = 0;
2282 u64 aq_required = 0;
2283
2284 /* keep cases separate because one ethertype for offloads can be
2285 * disabled at the same time as another is disabled, so check for an
2286 * enabled ethertype first, then check for disabled. Default to
2287 * ETH_P_8021Q so an ethertype is specified if disabling insertion and
2288 * stripping.
2289 */
2290 if (features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2291 vlan_ethertype = ETH_P_8021AD;
2292 else if (features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2293 vlan_ethertype = ETH_P_8021Q;
2294 else if (prev_features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX))
2295 vlan_ethertype = ETH_P_8021AD;
2296 else if (prev_features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX))
2297 vlan_ethertype = ETH_P_8021Q;
2298 else
2299 vlan_ethertype = ETH_P_8021Q;
2300
2301 if (!(features & (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_CTAG_RX)))
2302 enable_stripping = false;
2303 if (!(features & (NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_CTAG_TX)))
2304 enable_insertion = false;
2305
2306 if (VLAN_ALLOWED(adapter)) {
2307 /* VIRTCHNL_VF_OFFLOAD_VLAN only has support for toggling VLAN
2308 * stripping via virtchnl. VLAN insertion can be toggled on the
2309 * netdev, but it doesn't require a virtchnl message
2310 */
2311 if (enable_stripping)
2312 aq_required |= IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
2313 else
2314 aq_required |= IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
2315
2316 } else if (VLAN_V2_ALLOWED(adapter)) {
2317 switch (vlan_ethertype) {
2318 case ETH_P_8021Q:
2319 if (enable_stripping)
2320 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_STRIPPING;
2321 else
2322 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_STRIPPING;
2323
2324 if (enable_insertion)
2325 aq_required |= IAVF_FLAG_AQ_ENABLE_CTAG_VLAN_INSERTION;
2326 else
2327 aq_required |= IAVF_FLAG_AQ_DISABLE_CTAG_VLAN_INSERTION;
2328 break;
2329 case ETH_P_8021AD:
2330 if (enable_stripping)
2331 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_STRIPPING;
2332 else
2333 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_STRIPPING;
2334
2335 if (enable_insertion)
2336 aq_required |= IAVF_FLAG_AQ_ENABLE_STAG_VLAN_INSERTION;
2337 else
2338 aq_required |= IAVF_FLAG_AQ_DISABLE_STAG_VLAN_INSERTION;
2339 break;
2340 }
2341 }
2342
2343 if (aq_required) {
2344 adapter->aq_required |= aq_required;
2345 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
2346 }
2347 }
2348
2349 /**
2350 * iavf_startup - first step of driver startup
2351 * @adapter: board private structure
2352 *
2353 * Function process __IAVF_STARTUP driver state.
2354 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
2355 * when fails the state is changed to __IAVF_INIT_FAILED
2356 **/
iavf_startup(struct iavf_adapter * adapter)2357 static void iavf_startup(struct iavf_adapter *adapter)
2358 {
2359 struct pci_dev *pdev = adapter->pdev;
2360 struct iavf_hw *hw = &adapter->hw;
2361 enum iavf_status status;
2362 int ret;
2363
2364 WARN_ON(adapter->state != __IAVF_STARTUP);
2365
2366 /* driver loaded, probe complete */
2367 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2368 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2369 status = iavf_set_mac_type(hw);
2370 if (status) {
2371 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
2372 goto err;
2373 }
2374
2375 ret = iavf_check_reset_complete(hw);
2376 if (ret) {
2377 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
2378 ret);
2379 goto err;
2380 }
2381 hw->aq.num_arq_entries = IAVF_AQ_LEN;
2382 hw->aq.num_asq_entries = IAVF_AQ_LEN;
2383 hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2384 hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
2385
2386 status = iavf_init_adminq(hw);
2387 if (status) {
2388 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
2389 status);
2390 goto err;
2391 }
2392 ret = iavf_send_api_ver(adapter);
2393 if (ret) {
2394 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", ret);
2395 iavf_shutdown_adminq(hw);
2396 goto err;
2397 }
2398 iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
2399 return;
2400 err:
2401 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2402 }
2403
2404 /**
2405 * iavf_init_version_check - second step of driver startup
2406 * @adapter: board private structure
2407 *
2408 * Function process __IAVF_INIT_VERSION_CHECK driver state.
2409 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
2410 * when fails the state is changed to __IAVF_INIT_FAILED
2411 **/
iavf_init_version_check(struct iavf_adapter * adapter)2412 static void iavf_init_version_check(struct iavf_adapter *adapter)
2413 {
2414 struct pci_dev *pdev = adapter->pdev;
2415 struct iavf_hw *hw = &adapter->hw;
2416 int err = -EAGAIN;
2417
2418 WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
2419
2420 if (!iavf_asq_done(hw)) {
2421 dev_err(&pdev->dev, "Admin queue command never completed\n");
2422 iavf_shutdown_adminq(hw);
2423 iavf_change_state(adapter, __IAVF_STARTUP);
2424 goto err;
2425 }
2426
2427 /* aq msg sent, awaiting reply */
2428 err = iavf_verify_api_ver(adapter);
2429 if (err) {
2430 if (err == -EALREADY)
2431 err = iavf_send_api_ver(adapter);
2432 else
2433 dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
2434 adapter->pf_version.major,
2435 adapter->pf_version.minor,
2436 VIRTCHNL_VERSION_MAJOR,
2437 VIRTCHNL_VERSION_MINOR);
2438 goto err;
2439 }
2440 err = iavf_send_vf_config_msg(adapter);
2441 if (err) {
2442 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
2443 err);
2444 goto err;
2445 }
2446 iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
2447 return;
2448 err:
2449 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2450 }
2451
2452 /**
2453 * iavf_parse_vf_resource_msg - parse response from VIRTCHNL_OP_GET_VF_RESOURCES
2454 * @adapter: board private structure
2455 */
iavf_parse_vf_resource_msg(struct iavf_adapter * adapter)2456 int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
2457 {
2458 int i, num_req_queues = adapter->num_req_queues;
2459 struct iavf_vsi *vsi = &adapter->vsi;
2460
2461 for (i = 0; i < adapter->vf_res->num_vsis; i++) {
2462 if (adapter->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
2463 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
2464 }
2465 if (!adapter->vsi_res) {
2466 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
2467 return -ENODEV;
2468 }
2469
2470 if (num_req_queues &&
2471 num_req_queues > adapter->vsi_res->num_queue_pairs) {
2472 /* Problem. The PF gave us fewer queues than what we had
2473 * negotiated in our request. Need a reset to see if we can't
2474 * get back to a working state.
2475 */
2476 dev_err(&adapter->pdev->dev,
2477 "Requested %d queues, but PF only gave us %d.\n",
2478 num_req_queues,
2479 adapter->vsi_res->num_queue_pairs);
2480 adapter->flags |= IAVF_FLAG_REINIT_MSIX_NEEDED;
2481 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
2482 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
2483
2484 return -EAGAIN;
2485 }
2486 adapter->num_req_queues = 0;
2487 adapter->vsi.id = adapter->vsi_res->vsi_id;
2488
2489 adapter->vsi.back = adapter;
2490 adapter->vsi.base_vector = 1;
2491 vsi->netdev = adapter->netdev;
2492 vsi->qs_handle = adapter->vsi_res->qset_handle;
2493 if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2494 adapter->rss_key_size = adapter->vf_res->rss_key_size;
2495 adapter->rss_lut_size = adapter->vf_res->rss_lut_size;
2496 } else {
2497 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
2498 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
2499 }
2500
2501 return 0;
2502 }
2503
2504 /**
2505 * iavf_init_get_resources - third step of driver startup
2506 * @adapter: board private structure
2507 *
2508 * Function process __IAVF_INIT_GET_RESOURCES driver state and
2509 * finishes driver initialization procedure.
2510 * When success the state is changed to __IAVF_DOWN
2511 * when fails the state is changed to __IAVF_INIT_FAILED
2512 **/
iavf_init_get_resources(struct iavf_adapter * adapter)2513 static void iavf_init_get_resources(struct iavf_adapter *adapter)
2514 {
2515 struct pci_dev *pdev = adapter->pdev;
2516 struct iavf_hw *hw = &adapter->hw;
2517 int err;
2518
2519 WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
2520 /* aq msg sent, awaiting reply */
2521 if (!adapter->vf_res) {
2522 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
2523 GFP_KERNEL);
2524 if (!adapter->vf_res) {
2525 err = -ENOMEM;
2526 goto err;
2527 }
2528 }
2529 err = iavf_get_vf_config(adapter);
2530 if (err == -EALREADY) {
2531 err = iavf_send_vf_config_msg(adapter);
2532 goto err;
2533 } else if (err == -EINVAL) {
2534 /* We only get -EINVAL if the device is in a very bad
2535 * state or if we've been disabled for previous bad
2536 * behavior. Either way, we're done now.
2537 */
2538 iavf_shutdown_adminq(hw);
2539 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
2540 return;
2541 }
2542 if (err) {
2543 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
2544 goto err_alloc;
2545 }
2546
2547 err = iavf_parse_vf_resource_msg(adapter);
2548 if (err) {
2549 dev_err(&pdev->dev, "Failed to parse VF resource message from PF (%d)\n",
2550 err);
2551 goto err_alloc;
2552 }
2553 /* Some features require additional messages to negotiate extended
2554 * capabilities. These are processed in sequence by the
2555 * __IAVF_INIT_EXTENDED_CAPS driver state.
2556 */
2557 adapter->extended_caps = IAVF_EXTENDED_CAPS;
2558
2559 iavf_change_state(adapter, __IAVF_INIT_EXTENDED_CAPS);
2560 return;
2561
2562 err_alloc:
2563 kfree(adapter->vf_res);
2564 adapter->vf_res = NULL;
2565 err:
2566 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2567 }
2568
2569 /**
2570 * iavf_init_send_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2571 * @adapter: board private structure
2572 *
2573 * Function processes send of the extended VLAN V2 capability message to the
2574 * PF. Must clear IAVF_EXTENDED_CAP_RECV_VLAN_V2 if the message is not sent,
2575 * e.g. due to PF not negotiating VIRTCHNL_VF_OFFLOAD_VLAN_V2.
2576 */
iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter * adapter)2577 static void iavf_init_send_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2578 {
2579 int ret;
2580
2581 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2));
2582
2583 ret = iavf_send_vf_offload_vlan_v2_msg(adapter);
2584 if (ret && ret == -EOPNOTSUPP) {
2585 /* PF does not support VIRTCHNL_VF_OFFLOAD_V2. In this case,
2586 * we did not send the capability exchange message and do not
2587 * expect a response.
2588 */
2589 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2590 }
2591
2592 /* We sent the message, so move on to the next step */
2593 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2594 }
2595
2596 /**
2597 * iavf_init_recv_offload_vlan_v2_caps - part of initializing VLAN V2 caps
2598 * @adapter: board private structure
2599 *
2600 * Function processes receipt of the extended VLAN V2 capability message from
2601 * the PF.
2602 **/
iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter * adapter)2603 static void iavf_init_recv_offload_vlan_v2_caps(struct iavf_adapter *adapter)
2604 {
2605 int ret;
2606
2607 WARN_ON(!(adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2));
2608
2609 memset(&adapter->vlan_v2_caps, 0, sizeof(adapter->vlan_v2_caps));
2610
2611 ret = iavf_get_vf_vlan_v2_caps(adapter);
2612 if (ret)
2613 goto err;
2614
2615 /* We've processed receipt of the VLAN V2 caps message */
2616 adapter->extended_caps &= ~IAVF_EXTENDED_CAP_RECV_VLAN_V2;
2617 return;
2618 err:
2619 /* We didn't receive a reply. Make sure we try sending again when
2620 * __IAVF_INIT_FAILED attempts to recover.
2621 */
2622 adapter->extended_caps |= IAVF_EXTENDED_CAP_SEND_VLAN_V2;
2623 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2624 }
2625
2626 /**
2627 * iavf_init_process_extended_caps - Part of driver startup
2628 * @adapter: board private structure
2629 *
2630 * Function processes __IAVF_INIT_EXTENDED_CAPS driver state. This state
2631 * handles negotiating capabilities for features which require an additional
2632 * message.
2633 *
2634 * Once all extended capabilities exchanges are finished, the driver will
2635 * transition into __IAVF_INIT_CONFIG_ADAPTER.
2636 */
iavf_init_process_extended_caps(struct iavf_adapter * adapter)2637 static void iavf_init_process_extended_caps(struct iavf_adapter *adapter)
2638 {
2639 WARN_ON(adapter->state != __IAVF_INIT_EXTENDED_CAPS);
2640
2641 /* Process capability exchange for VLAN V2 */
2642 if (adapter->extended_caps & IAVF_EXTENDED_CAP_SEND_VLAN_V2) {
2643 iavf_init_send_offload_vlan_v2_caps(adapter);
2644 return;
2645 } else if (adapter->extended_caps & IAVF_EXTENDED_CAP_RECV_VLAN_V2) {
2646 iavf_init_recv_offload_vlan_v2_caps(adapter);
2647 return;
2648 }
2649
2650 /* When we reach here, no further extended capabilities exchanges are
2651 * necessary, so we finally transition into __IAVF_INIT_CONFIG_ADAPTER
2652 */
2653 iavf_change_state(adapter, __IAVF_INIT_CONFIG_ADAPTER);
2654 }
2655
2656 /**
2657 * iavf_init_config_adapter - last part of driver startup
2658 * @adapter: board private structure
2659 *
2660 * After all the supported capabilities are negotiated, then the
2661 * __IAVF_INIT_CONFIG_ADAPTER state will finish driver initialization.
2662 */
iavf_init_config_adapter(struct iavf_adapter * adapter)2663 static void iavf_init_config_adapter(struct iavf_adapter *adapter)
2664 {
2665 struct net_device *netdev = adapter->netdev;
2666 struct pci_dev *pdev = adapter->pdev;
2667 int err;
2668
2669 WARN_ON(adapter->state != __IAVF_INIT_CONFIG_ADAPTER);
2670
2671 if (iavf_process_config(adapter))
2672 goto err;
2673
2674 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2675
2676 adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
2677
2678 netdev->netdev_ops = &iavf_netdev_ops;
2679 iavf_set_ethtool_ops(netdev);
2680 netdev->watchdog_timeo = 5 * HZ;
2681
2682 /* MTU range: 68 - 9710 */
2683 netdev->min_mtu = ETH_MIN_MTU;
2684 netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
2685
2686 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
2687 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
2688 adapter->hw.mac.addr);
2689 eth_hw_addr_random(netdev);
2690 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
2691 } else {
2692 eth_hw_addr_set(netdev, adapter->hw.mac.addr);
2693 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
2694 }
2695
2696 adapter->tx_desc_count = IAVF_DEFAULT_TXD;
2697 adapter->rx_desc_count = IAVF_DEFAULT_RXD;
2698 err = iavf_init_interrupt_scheme(adapter);
2699 if (err)
2700 goto err_sw_init;
2701 iavf_map_rings_to_vectors(adapter);
2702 if (adapter->vf_res->vf_cap_flags &
2703 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2704 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
2705
2706 err = iavf_request_misc_irq(adapter);
2707 if (err)
2708 goto err_sw_init;
2709
2710 netif_carrier_off(netdev);
2711 adapter->link_up = false;
2712 netif_tx_stop_all_queues(netdev);
2713
2714 if (CLIENT_ALLOWED(adapter)) {
2715 err = iavf_lan_add_device(adapter);
2716 if (err)
2717 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
2718 err);
2719 }
2720 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
2721 if (netdev->features & NETIF_F_GRO)
2722 dev_info(&pdev->dev, "GRO is enabled\n");
2723
2724 iavf_change_state(adapter, __IAVF_DOWN);
2725 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2726
2727 iavf_misc_irq_enable(adapter);
2728 wake_up(&adapter->down_waitqueue);
2729
2730 adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
2731 adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
2732 if (!adapter->rss_key || !adapter->rss_lut) {
2733 err = -ENOMEM;
2734 goto err_mem;
2735 }
2736 if (RSS_AQ(adapter))
2737 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2738 else
2739 iavf_init_rss(adapter);
2740
2741 if (VLAN_V2_ALLOWED(adapter))
2742 /* request initial VLAN offload settings */
2743 iavf_set_vlan_offload_features(adapter, 0, netdev->features);
2744
2745 iavf_schedule_finish_config(adapter);
2746 return;
2747
2748 err_mem:
2749 iavf_free_rss(adapter);
2750 iavf_free_misc_irq(adapter);
2751 err_sw_init:
2752 iavf_reset_interrupt_capability(adapter);
2753 err:
2754 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2755 }
2756
2757 /**
2758 * iavf_watchdog_task - Periodic call-back task
2759 * @work: pointer to work_struct
2760 **/
iavf_watchdog_task(struct work_struct * work)2761 static void iavf_watchdog_task(struct work_struct *work)
2762 {
2763 struct iavf_adapter *adapter = container_of(work,
2764 struct iavf_adapter,
2765 watchdog_task.work);
2766 struct iavf_hw *hw = &adapter->hw;
2767 u32 reg_val;
2768
2769 if (!mutex_trylock(&adapter->crit_lock)) {
2770 if (adapter->state == __IAVF_REMOVE)
2771 return;
2772
2773 goto restart_watchdog;
2774 }
2775
2776 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2777 iavf_change_state(adapter, __IAVF_COMM_FAILED);
2778
2779 switch (adapter->state) {
2780 case __IAVF_STARTUP:
2781 iavf_startup(adapter);
2782 mutex_unlock(&adapter->crit_lock);
2783 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2784 msecs_to_jiffies(30));
2785 return;
2786 case __IAVF_INIT_VERSION_CHECK:
2787 iavf_init_version_check(adapter);
2788 mutex_unlock(&adapter->crit_lock);
2789 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2790 msecs_to_jiffies(30));
2791 return;
2792 case __IAVF_INIT_GET_RESOURCES:
2793 iavf_init_get_resources(adapter);
2794 mutex_unlock(&adapter->crit_lock);
2795 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2796 msecs_to_jiffies(1));
2797 return;
2798 case __IAVF_INIT_EXTENDED_CAPS:
2799 iavf_init_process_extended_caps(adapter);
2800 mutex_unlock(&adapter->crit_lock);
2801 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2802 msecs_to_jiffies(1));
2803 return;
2804 case __IAVF_INIT_CONFIG_ADAPTER:
2805 iavf_init_config_adapter(adapter);
2806 mutex_unlock(&adapter->crit_lock);
2807 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2808 msecs_to_jiffies(1));
2809 return;
2810 case __IAVF_INIT_FAILED:
2811 if (test_bit(__IAVF_IN_REMOVE_TASK,
2812 &adapter->crit_section)) {
2813 /* Do not update the state and do not reschedule
2814 * watchdog task, iavf_remove should handle this state
2815 * as it can loop forever
2816 */
2817 mutex_unlock(&adapter->crit_lock);
2818 return;
2819 }
2820 if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
2821 dev_err(&adapter->pdev->dev,
2822 "Failed to communicate with PF; waiting before retry\n");
2823 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2824 iavf_shutdown_adminq(hw);
2825 mutex_unlock(&adapter->crit_lock);
2826 queue_delayed_work(adapter->wq,
2827 &adapter->watchdog_task, (5 * HZ));
2828 return;
2829 }
2830 /* Try again from failed step*/
2831 iavf_change_state(adapter, adapter->last_state);
2832 mutex_unlock(&adapter->crit_lock);
2833 queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ);
2834 return;
2835 case __IAVF_COMM_FAILED:
2836 if (test_bit(__IAVF_IN_REMOVE_TASK,
2837 &adapter->crit_section)) {
2838 /* Set state to __IAVF_INIT_FAILED and perform remove
2839 * steps. Remove IAVF_FLAG_PF_COMMS_FAILED so the task
2840 * doesn't bring the state back to __IAVF_COMM_FAILED.
2841 */
2842 iavf_change_state(adapter, __IAVF_INIT_FAILED);
2843 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2844 mutex_unlock(&adapter->crit_lock);
2845 return;
2846 }
2847 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2848 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2849 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
2850 reg_val == VIRTCHNL_VFR_COMPLETED) {
2851 /* A chance for redemption! */
2852 dev_err(&adapter->pdev->dev,
2853 "Hardware came out of reset. Attempting reinit.\n");
2854 /* When init task contacts the PF and
2855 * gets everything set up again, it'll restart the
2856 * watchdog for us. Down, boy. Sit. Stay. Woof.
2857 */
2858 iavf_change_state(adapter, __IAVF_STARTUP);
2859 adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
2860 }
2861 adapter->aq_required = 0;
2862 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2863 mutex_unlock(&adapter->crit_lock);
2864 queue_delayed_work(adapter->wq,
2865 &adapter->watchdog_task,
2866 msecs_to_jiffies(10));
2867 return;
2868 case __IAVF_RESETTING:
2869 mutex_unlock(&adapter->crit_lock);
2870 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2871 HZ * 2);
2872 return;
2873 case __IAVF_DOWN:
2874 case __IAVF_DOWN_PENDING:
2875 case __IAVF_TESTING:
2876 case __IAVF_RUNNING:
2877 if (adapter->current_op) {
2878 if (!iavf_asq_done(hw)) {
2879 dev_dbg(&adapter->pdev->dev,
2880 "Admin queue timeout\n");
2881 iavf_send_api_ver(adapter);
2882 }
2883 } else {
2884 int ret = iavf_process_aq_command(adapter);
2885
2886 /* An error will be returned if no commands were
2887 * processed; use this opportunity to update stats
2888 * if the error isn't -ENOTSUPP
2889 */
2890 if (ret && ret != -EOPNOTSUPP &&
2891 adapter->state == __IAVF_RUNNING)
2892 iavf_request_stats(adapter);
2893 }
2894 if (adapter->state == __IAVF_RUNNING)
2895 iavf_detect_recover_hung(&adapter->vsi);
2896 break;
2897 case __IAVF_REMOVE:
2898 default:
2899 mutex_unlock(&adapter->crit_lock);
2900 return;
2901 }
2902
2903 /* check for hw reset */
2904 reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2905 if (!reg_val) {
2906 adapter->aq_required = 0;
2907 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2908 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
2909 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_PENDING);
2910 mutex_unlock(&adapter->crit_lock);
2911 queue_delayed_work(adapter->wq,
2912 &adapter->watchdog_task, HZ * 2);
2913 return;
2914 }
2915
2916 schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
2917 mutex_unlock(&adapter->crit_lock);
2918 restart_watchdog:
2919 if (adapter->state >= __IAVF_DOWN)
2920 queue_work(adapter->wq, &adapter->adminq_task);
2921 if (adapter->aq_required)
2922 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2923 msecs_to_jiffies(20));
2924 else
2925 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
2926 HZ * 2);
2927 }
2928
2929 /**
2930 * iavf_disable_vf - disable VF
2931 * @adapter: board private structure
2932 *
2933 * Set communication failed flag and free all resources.
2934 * NOTE: This function is expected to be called with crit_lock being held.
2935 **/
iavf_disable_vf(struct iavf_adapter * adapter)2936 static void iavf_disable_vf(struct iavf_adapter *adapter)
2937 {
2938 struct iavf_mac_filter *f, *ftmp;
2939 struct iavf_vlan_filter *fv, *fvtmp;
2940 struct iavf_cloud_filter *cf, *cftmp;
2941
2942 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
2943
2944 /* We don't use netif_running() because it may be true prior to
2945 * ndo_open() returning, so we can't assume it means all our open
2946 * tasks have finished, since we're not holding the rtnl_lock here.
2947 */
2948 if (adapter->state == __IAVF_RUNNING) {
2949 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2950 netif_carrier_off(adapter->netdev);
2951 netif_tx_disable(adapter->netdev);
2952 adapter->link_up = false;
2953 iavf_napi_disable_all(adapter);
2954 iavf_irq_disable(adapter);
2955 iavf_free_traffic_irqs(adapter);
2956 iavf_free_all_tx_resources(adapter);
2957 iavf_free_all_rx_resources(adapter);
2958 }
2959
2960 spin_lock_bh(&adapter->mac_vlan_list_lock);
2961
2962 /* Delete all of the filters */
2963 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2964 list_del(&f->list);
2965 kfree(f);
2966 }
2967
2968 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2969 list_del(&fv->list);
2970 kfree(fv);
2971 }
2972 adapter->num_vlan_filters = 0;
2973
2974 spin_unlock_bh(&adapter->mac_vlan_list_lock);
2975
2976 spin_lock_bh(&adapter->cloud_filter_list_lock);
2977 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2978 list_del(&cf->list);
2979 kfree(cf);
2980 adapter->num_cloud_filters--;
2981 }
2982 spin_unlock_bh(&adapter->cloud_filter_list_lock);
2983
2984 iavf_free_misc_irq(adapter);
2985 iavf_reset_interrupt_capability(adapter);
2986 iavf_free_q_vectors(adapter);
2987 iavf_free_queues(adapter);
2988 memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2989 iavf_shutdown_adminq(&adapter->hw);
2990 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2991 iavf_change_state(adapter, __IAVF_DOWN);
2992 wake_up(&adapter->down_waitqueue);
2993 dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2994 }
2995
2996 /**
2997 * iavf_reset_task - Call-back task to handle hardware reset
2998 * @work: pointer to work_struct
2999 *
3000 * During reset we need to shut down and reinitialize the admin queue
3001 * before we can use it to communicate with the PF again. We also clear
3002 * and reinit the rings because that context is lost as well.
3003 **/
iavf_reset_task(struct work_struct * work)3004 static void iavf_reset_task(struct work_struct *work)
3005 {
3006 struct iavf_adapter *adapter = container_of(work,
3007 struct iavf_adapter,
3008 reset_task);
3009 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3010 struct net_device *netdev = adapter->netdev;
3011 struct iavf_hw *hw = &adapter->hw;
3012 struct iavf_mac_filter *f, *ftmp;
3013 struct iavf_cloud_filter *cf;
3014 enum iavf_status status;
3015 u32 reg_val;
3016 int i = 0, err;
3017 bool running;
3018
3019 /* When device is being removed it doesn't make sense to run the reset
3020 * task, just return in such a case.
3021 */
3022 if (!mutex_trylock(&adapter->crit_lock)) {
3023 if (adapter->state != __IAVF_REMOVE)
3024 queue_work(adapter->wq, &adapter->reset_task);
3025
3026 return;
3027 }
3028
3029 while (!mutex_trylock(&adapter->client_lock))
3030 usleep_range(500, 1000);
3031 if (CLIENT_ENABLED(adapter)) {
3032 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
3033 IAVF_FLAG_CLIENT_NEEDS_CLOSE |
3034 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
3035 IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
3036 cancel_delayed_work_sync(&adapter->client_task);
3037 iavf_notify_client_close(&adapter->vsi, true);
3038 }
3039 iavf_misc_irq_disable(adapter);
3040 if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
3041 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
3042 /* Restart the AQ here. If we have been reset but didn't
3043 * detect it, or if the PF had to reinit, our AQ will be hosed.
3044 */
3045 iavf_shutdown_adminq(hw);
3046 iavf_init_adminq(hw);
3047 iavf_request_reset(adapter);
3048 }
3049 adapter->flags |= IAVF_FLAG_RESET_PENDING;
3050
3051 /* poll until we see the reset actually happen */
3052 for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
3053 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
3054 IAVF_VF_ARQLEN1_ARQENABLE_MASK;
3055 if (!reg_val)
3056 break;
3057 usleep_range(5000, 10000);
3058 }
3059 if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
3060 dev_info(&adapter->pdev->dev, "Never saw reset\n");
3061 goto continue_reset; /* act like the reset happened */
3062 }
3063
3064 /* wait until the reset is complete and the PF is responding to us */
3065 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3066 /* sleep first to make sure a minimum wait time is met */
3067 msleep(IAVF_RESET_WAIT_MS);
3068
3069 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
3070 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3071 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
3072 break;
3073 }
3074
3075 pci_set_master(adapter->pdev);
3076 pci_restore_msi_state(adapter->pdev);
3077
3078 if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
3079 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
3080 reg_val);
3081 iavf_disable_vf(adapter);
3082 mutex_unlock(&adapter->client_lock);
3083 mutex_unlock(&adapter->crit_lock);
3084 return; /* Do not attempt to reinit. It's dead, Jim. */
3085 }
3086
3087 continue_reset:
3088 /* We don't use netif_running() because it may be true prior to
3089 * ndo_open() returning, so we can't assume it means all our open
3090 * tasks have finished, since we're not holding the rtnl_lock here.
3091 */
3092 running = adapter->state == __IAVF_RUNNING;
3093
3094 if (running) {
3095 netif_carrier_off(netdev);
3096 netif_tx_stop_all_queues(netdev);
3097 adapter->link_up = false;
3098 iavf_napi_disable_all(adapter);
3099 }
3100 iavf_irq_disable(adapter);
3101
3102 iavf_change_state(adapter, __IAVF_RESETTING);
3103 adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
3104
3105 /* free the Tx/Rx rings and descriptors, might be better to just
3106 * re-use them sometime in the future
3107 */
3108 iavf_free_all_rx_resources(adapter);
3109 iavf_free_all_tx_resources(adapter);
3110
3111 adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
3112 /* kill and reinit the admin queue */
3113 iavf_shutdown_adminq(hw);
3114 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
3115 status = iavf_init_adminq(hw);
3116 if (status) {
3117 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
3118 status);
3119 goto reset_err;
3120 }
3121 adapter->aq_required = 0;
3122
3123 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3124 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3125 err = iavf_reinit_interrupt_scheme(adapter, running);
3126 if (err)
3127 goto reset_err;
3128 }
3129
3130 if (RSS_AQ(adapter)) {
3131 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
3132 } else {
3133 err = iavf_init_rss(adapter);
3134 if (err)
3135 goto reset_err;
3136 }
3137
3138 adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
3139 /* always set since VIRTCHNL_OP_GET_VF_RESOURCES has not been
3140 * sent/received yet, so VLAN_V2_ALLOWED() cannot is not reliable here,
3141 * however the VIRTCHNL_OP_GET_OFFLOAD_VLAN_V2_CAPS won't be sent until
3142 * VIRTCHNL_OP_GET_VF_RESOURCES and VIRTCHNL_VF_OFFLOAD_VLAN_V2 have
3143 * been successfully sent and negotiated
3144 */
3145 adapter->aq_required |= IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS;
3146 adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
3147
3148 spin_lock_bh(&adapter->mac_vlan_list_lock);
3149
3150 /* Delete filter for the current MAC address, it could have
3151 * been changed by the PF via administratively set MAC.
3152 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
3153 */
3154 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3155 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
3156 list_del(&f->list);
3157 kfree(f);
3158 }
3159 }
3160 /* re-add all MAC filters */
3161 list_for_each_entry(f, &adapter->mac_filter_list, list) {
3162 f->add = true;
3163 }
3164 spin_unlock_bh(&adapter->mac_vlan_list_lock);
3165
3166 /* check if TCs are running and re-add all cloud filters */
3167 spin_lock_bh(&adapter->cloud_filter_list_lock);
3168 if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
3169 adapter->num_tc) {
3170 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
3171 cf->add = true;
3172 }
3173 }
3174 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3175
3176 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
3177 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3178 iavf_misc_irq_enable(adapter);
3179
3180 mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2);
3181
3182 /* We were running when the reset started, so we need to restore some
3183 * state here.
3184 */
3185 if (running) {
3186 /* allocate transmit descriptors */
3187 err = iavf_setup_all_tx_resources(adapter);
3188 if (err)
3189 goto reset_err;
3190
3191 /* allocate receive descriptors */
3192 err = iavf_setup_all_rx_resources(adapter);
3193 if (err)
3194 goto reset_err;
3195
3196 if ((adapter->flags & IAVF_FLAG_REINIT_MSIX_NEEDED) ||
3197 (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED)) {
3198 err = iavf_request_traffic_irqs(adapter, netdev->name);
3199 if (err)
3200 goto reset_err;
3201
3202 adapter->flags &= ~IAVF_FLAG_REINIT_MSIX_NEEDED;
3203 }
3204
3205 iavf_configure(adapter);
3206
3207 /* iavf_up_complete() will switch device back
3208 * to __IAVF_RUNNING
3209 */
3210 iavf_up_complete(adapter);
3211
3212 iavf_irq_enable(adapter, true);
3213 } else {
3214 iavf_change_state(adapter, __IAVF_DOWN);
3215 wake_up(&adapter->down_waitqueue);
3216 }
3217
3218 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3219
3220 wake_up(&adapter->reset_waitqueue);
3221 mutex_unlock(&adapter->client_lock);
3222 mutex_unlock(&adapter->crit_lock);
3223
3224 return;
3225 reset_err:
3226 if (running) {
3227 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3228 iavf_free_traffic_irqs(adapter);
3229 }
3230 iavf_disable_vf(adapter);
3231
3232 mutex_unlock(&adapter->client_lock);
3233 mutex_unlock(&adapter->crit_lock);
3234 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
3235 }
3236
3237 /**
3238 * iavf_adminq_task - worker thread to clean the admin queue
3239 * @work: pointer to work_struct containing our data
3240 **/
iavf_adminq_task(struct work_struct * work)3241 static void iavf_adminq_task(struct work_struct *work)
3242 {
3243 struct iavf_adapter *adapter =
3244 container_of(work, struct iavf_adapter, adminq_task);
3245 struct iavf_hw *hw = &adapter->hw;
3246 struct iavf_arq_event_info event;
3247 enum virtchnl_ops v_op;
3248 enum iavf_status ret, v_ret;
3249 u32 val, oldval;
3250 u16 pending;
3251
3252 if (!mutex_trylock(&adapter->crit_lock)) {
3253 if (adapter->state == __IAVF_REMOVE)
3254 return;
3255
3256 queue_work(adapter->wq, &adapter->adminq_task);
3257 goto out;
3258 }
3259
3260 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
3261 goto unlock;
3262
3263 event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
3264 event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
3265 if (!event.msg_buf)
3266 goto unlock;
3267
3268 do {
3269 ret = iavf_clean_arq_element(hw, &event, &pending);
3270 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
3271 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
3272
3273 if (ret || !v_op)
3274 break; /* No event to process or error cleaning ARQ */
3275
3276 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
3277 event.msg_len);
3278 if (pending != 0)
3279 memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
3280 } while (pending);
3281
3282 if (iavf_is_reset_in_progress(adapter))
3283 goto freedom;
3284
3285 /* check for error indications */
3286 val = rd32(hw, hw->aq.arq.len);
3287 if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
3288 goto freedom;
3289 oldval = val;
3290 if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
3291 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
3292 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
3293 }
3294 if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
3295 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
3296 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
3297 }
3298 if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
3299 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
3300 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
3301 }
3302 if (oldval != val)
3303 wr32(hw, hw->aq.arq.len, val);
3304
3305 val = rd32(hw, hw->aq.asq.len);
3306 oldval = val;
3307 if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
3308 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
3309 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
3310 }
3311 if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
3312 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
3313 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
3314 }
3315 if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
3316 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
3317 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
3318 }
3319 if (oldval != val)
3320 wr32(hw, hw->aq.asq.len, val);
3321
3322 freedom:
3323 kfree(event.msg_buf);
3324 unlock:
3325 mutex_unlock(&adapter->crit_lock);
3326 out:
3327 /* re-enable Admin queue interrupt cause */
3328 iavf_misc_irq_enable(adapter);
3329 }
3330
3331 /**
3332 * iavf_client_task - worker thread to perform client work
3333 * @work: pointer to work_struct containing our data
3334 *
3335 * This task handles client interactions. Because client calls can be
3336 * reentrant, we can't handle them in the watchdog.
3337 **/
iavf_client_task(struct work_struct * work)3338 static void iavf_client_task(struct work_struct *work)
3339 {
3340 struct iavf_adapter *adapter =
3341 container_of(work, struct iavf_adapter, client_task.work);
3342
3343 /* If we can't get the client bit, just give up. We'll be rescheduled
3344 * later.
3345 */
3346
3347 if (!mutex_trylock(&adapter->client_lock))
3348 return;
3349
3350 if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
3351 iavf_client_subtask(adapter);
3352 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3353 goto out;
3354 }
3355 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
3356 iavf_notify_client_l2_params(&adapter->vsi);
3357 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
3358 goto out;
3359 }
3360 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
3361 iavf_notify_client_close(&adapter->vsi, false);
3362 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3363 goto out;
3364 }
3365 if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
3366 iavf_notify_client_open(&adapter->vsi);
3367 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
3368 }
3369 out:
3370 mutex_unlock(&adapter->client_lock);
3371 }
3372
3373 /**
3374 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
3375 * @adapter: board private structure
3376 *
3377 * Free all transmit software resources
3378 **/
iavf_free_all_tx_resources(struct iavf_adapter * adapter)3379 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
3380 {
3381 int i;
3382
3383 if (!adapter->tx_rings)
3384 return;
3385
3386 for (i = 0; i < adapter->num_active_queues; i++)
3387 if (adapter->tx_rings[i].desc)
3388 iavf_free_tx_resources(&adapter->tx_rings[i]);
3389 }
3390
3391 /**
3392 * iavf_setup_all_tx_resources - allocate all queues Tx resources
3393 * @adapter: board private structure
3394 *
3395 * If this function returns with an error, then it's possible one or
3396 * more of the rings is populated (while the rest are not). It is the
3397 * callers duty to clean those orphaned rings.
3398 *
3399 * Return 0 on success, negative on failure
3400 **/
iavf_setup_all_tx_resources(struct iavf_adapter * adapter)3401 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
3402 {
3403 int i, err = 0;
3404
3405 for (i = 0; i < adapter->num_active_queues; i++) {
3406 adapter->tx_rings[i].count = adapter->tx_desc_count;
3407 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
3408 if (!err)
3409 continue;
3410 dev_err(&adapter->pdev->dev,
3411 "Allocation for Tx Queue %u failed\n", i);
3412 break;
3413 }
3414
3415 return err;
3416 }
3417
3418 /**
3419 * iavf_setup_all_rx_resources - allocate all queues Rx resources
3420 * @adapter: board private structure
3421 *
3422 * If this function returns with an error, then it's possible one or
3423 * more of the rings is populated (while the rest are not). It is the
3424 * callers duty to clean those orphaned rings.
3425 *
3426 * Return 0 on success, negative on failure
3427 **/
iavf_setup_all_rx_resources(struct iavf_adapter * adapter)3428 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
3429 {
3430 int i, err = 0;
3431
3432 for (i = 0; i < adapter->num_active_queues; i++) {
3433 adapter->rx_rings[i].count = adapter->rx_desc_count;
3434 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
3435 if (!err)
3436 continue;
3437 dev_err(&adapter->pdev->dev,
3438 "Allocation for Rx Queue %u failed\n", i);
3439 break;
3440 }
3441 return err;
3442 }
3443
3444 /**
3445 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
3446 * @adapter: board private structure
3447 *
3448 * Free all receive software resources
3449 **/
iavf_free_all_rx_resources(struct iavf_adapter * adapter)3450 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
3451 {
3452 int i;
3453
3454 if (!adapter->rx_rings)
3455 return;
3456
3457 for (i = 0; i < adapter->num_active_queues; i++)
3458 if (adapter->rx_rings[i].desc)
3459 iavf_free_rx_resources(&adapter->rx_rings[i]);
3460 }
3461
3462 /**
3463 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
3464 * @adapter: board private structure
3465 * @max_tx_rate: max Tx bw for a tc
3466 **/
iavf_validate_tx_bandwidth(struct iavf_adapter * adapter,u64 max_tx_rate)3467 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
3468 u64 max_tx_rate)
3469 {
3470 int speed = 0, ret = 0;
3471
3472 if (ADV_LINK_SUPPORT(adapter)) {
3473 if (adapter->link_speed_mbps < U32_MAX) {
3474 speed = adapter->link_speed_mbps;
3475 goto validate_bw;
3476 } else {
3477 dev_err(&adapter->pdev->dev, "Unknown link speed\n");
3478 return -EINVAL;
3479 }
3480 }
3481
3482 switch (adapter->link_speed) {
3483 case VIRTCHNL_LINK_SPEED_40GB:
3484 speed = SPEED_40000;
3485 break;
3486 case VIRTCHNL_LINK_SPEED_25GB:
3487 speed = SPEED_25000;
3488 break;
3489 case VIRTCHNL_LINK_SPEED_20GB:
3490 speed = SPEED_20000;
3491 break;
3492 case VIRTCHNL_LINK_SPEED_10GB:
3493 speed = SPEED_10000;
3494 break;
3495 case VIRTCHNL_LINK_SPEED_5GB:
3496 speed = SPEED_5000;
3497 break;
3498 case VIRTCHNL_LINK_SPEED_2_5GB:
3499 speed = SPEED_2500;
3500 break;
3501 case VIRTCHNL_LINK_SPEED_1GB:
3502 speed = SPEED_1000;
3503 break;
3504 case VIRTCHNL_LINK_SPEED_100MB:
3505 speed = SPEED_100;
3506 break;
3507 default:
3508 break;
3509 }
3510
3511 validate_bw:
3512 if (max_tx_rate > speed) {
3513 dev_err(&adapter->pdev->dev,
3514 "Invalid tx rate specified\n");
3515 ret = -EINVAL;
3516 }
3517
3518 return ret;
3519 }
3520
3521 /**
3522 * iavf_validate_ch_config - validate queue mapping info
3523 * @adapter: board private structure
3524 * @mqprio_qopt: queue parameters
3525 *
3526 * This function validates if the config provided by the user to
3527 * configure queue channels is valid or not. Returns 0 on a valid
3528 * config.
3529 **/
iavf_validate_ch_config(struct iavf_adapter * adapter,struct tc_mqprio_qopt_offload * mqprio_qopt)3530 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
3531 struct tc_mqprio_qopt_offload *mqprio_qopt)
3532 {
3533 u64 total_max_rate = 0;
3534 u32 tx_rate_rem = 0;
3535 int i, num_qps = 0;
3536 u64 tx_rate = 0;
3537 int ret = 0;
3538
3539 if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
3540 mqprio_qopt->qopt.num_tc < 1)
3541 return -EINVAL;
3542
3543 for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
3544 if (!mqprio_qopt->qopt.count[i] ||
3545 mqprio_qopt->qopt.offset[i] != num_qps)
3546 return -EINVAL;
3547 if (mqprio_qopt->min_rate[i]) {
3548 dev_err(&adapter->pdev->dev,
3549 "Invalid min tx rate (greater than 0) specified for TC%d\n",
3550 i);
3551 return -EINVAL;
3552 }
3553
3554 /* convert to Mbps */
3555 tx_rate = div_u64(mqprio_qopt->max_rate[i],
3556 IAVF_MBPS_DIVISOR);
3557
3558 if (mqprio_qopt->max_rate[i] &&
3559 tx_rate < IAVF_MBPS_QUANTA) {
3560 dev_err(&adapter->pdev->dev,
3561 "Invalid max tx rate for TC%d, minimum %dMbps\n",
3562 i, IAVF_MBPS_QUANTA);
3563 return -EINVAL;
3564 }
3565
3566 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
3567
3568 if (tx_rate_rem != 0) {
3569 dev_err(&adapter->pdev->dev,
3570 "Invalid max tx rate for TC%d, not divisible by %d\n",
3571 i, IAVF_MBPS_QUANTA);
3572 return -EINVAL;
3573 }
3574
3575 total_max_rate += tx_rate;
3576 num_qps += mqprio_qopt->qopt.count[i];
3577 }
3578 if (num_qps > adapter->num_active_queues) {
3579 dev_err(&adapter->pdev->dev,
3580 "Cannot support requested number of queues\n");
3581 return -EINVAL;
3582 }
3583
3584 ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
3585 return ret;
3586 }
3587
3588 /**
3589 * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
3590 * @adapter: board private structure
3591 **/
iavf_del_all_cloud_filters(struct iavf_adapter * adapter)3592 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
3593 {
3594 struct iavf_cloud_filter *cf, *cftmp;
3595
3596 spin_lock_bh(&adapter->cloud_filter_list_lock);
3597 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
3598 list) {
3599 list_del(&cf->list);
3600 kfree(cf);
3601 adapter->num_cloud_filters--;
3602 }
3603 spin_unlock_bh(&adapter->cloud_filter_list_lock);
3604 }
3605
3606 /**
3607 * __iavf_setup_tc - configure multiple traffic classes
3608 * @netdev: network interface device structure
3609 * @type_data: tc offload data
3610 *
3611 * This function processes the config information provided by the
3612 * user to configure traffic classes/queue channels and packages the
3613 * information to request the PF to setup traffic classes.
3614 *
3615 * Returns 0 on success.
3616 **/
__iavf_setup_tc(struct net_device * netdev,void * type_data)3617 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
3618 {
3619 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
3620 struct iavf_adapter *adapter = netdev_priv(netdev);
3621 struct virtchnl_vf_resource *vfres = adapter->vf_res;
3622 u8 num_tc = 0, total_qps = 0;
3623 int ret = 0, netdev_tc = 0;
3624 u64 max_tx_rate;
3625 u16 mode;
3626 int i;
3627
3628 num_tc = mqprio_qopt->qopt.num_tc;
3629 mode = mqprio_qopt->mode;
3630
3631 /* delete queue_channel */
3632 if (!mqprio_qopt->qopt.hw) {
3633 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
3634 /* reset the tc configuration */
3635 netdev_reset_tc(netdev);
3636 adapter->num_tc = 0;
3637 netif_tx_stop_all_queues(netdev);
3638 netif_tx_disable(netdev);
3639 iavf_del_all_cloud_filters(adapter);
3640 adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
3641 total_qps = adapter->orig_num_active_queues;
3642 goto exit;
3643 } else {
3644 return -EINVAL;
3645 }
3646 }
3647
3648 /* add queue channel */
3649 if (mode == TC_MQPRIO_MODE_CHANNEL) {
3650 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
3651 dev_err(&adapter->pdev->dev, "ADq not supported\n");
3652 return -EOPNOTSUPP;
3653 }
3654 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
3655 dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
3656 return -EINVAL;
3657 }
3658
3659 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
3660 if (ret)
3661 return ret;
3662 /* Return if same TC config is requested */
3663 if (adapter->num_tc == num_tc)
3664 return 0;
3665 adapter->num_tc = num_tc;
3666
3667 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3668 if (i < num_tc) {
3669 adapter->ch_config.ch_info[i].count =
3670 mqprio_qopt->qopt.count[i];
3671 adapter->ch_config.ch_info[i].offset =
3672 mqprio_qopt->qopt.offset[i];
3673 total_qps += mqprio_qopt->qopt.count[i];
3674 max_tx_rate = mqprio_qopt->max_rate[i];
3675 /* convert to Mbps */
3676 max_tx_rate = div_u64(max_tx_rate,
3677 IAVF_MBPS_DIVISOR);
3678 adapter->ch_config.ch_info[i].max_tx_rate =
3679 max_tx_rate;
3680 } else {
3681 adapter->ch_config.ch_info[i].count = 1;
3682 adapter->ch_config.ch_info[i].offset = 0;
3683 }
3684 }
3685
3686 /* Take snapshot of original config such as "num_active_queues"
3687 * It is used later when delete ADQ flow is exercised, so that
3688 * once delete ADQ flow completes, VF shall go back to its
3689 * original queue configuration
3690 */
3691
3692 adapter->orig_num_active_queues = adapter->num_active_queues;
3693
3694 /* Store queue info based on TC so that VF gets configured
3695 * with correct number of queues when VF completes ADQ config
3696 * flow
3697 */
3698 adapter->ch_config.total_qps = total_qps;
3699
3700 netif_tx_stop_all_queues(netdev);
3701 netif_tx_disable(netdev);
3702 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
3703 netdev_reset_tc(netdev);
3704 /* Report the tc mapping up the stack */
3705 netdev_set_num_tc(adapter->netdev, num_tc);
3706 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
3707 u16 qcount = mqprio_qopt->qopt.count[i];
3708 u16 qoffset = mqprio_qopt->qopt.offset[i];
3709
3710 if (i < num_tc)
3711 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
3712 qoffset);
3713 }
3714 }
3715 exit:
3716 if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
3717 return 0;
3718
3719 netif_set_real_num_rx_queues(netdev, total_qps);
3720 netif_set_real_num_tx_queues(netdev, total_qps);
3721
3722 return ret;
3723 }
3724
3725 /**
3726 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
3727 * @adapter: board private structure
3728 * @f: pointer to struct flow_cls_offload
3729 * @filter: pointer to cloud filter structure
3730 */
iavf_parse_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * f,struct iavf_cloud_filter * filter)3731 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
3732 struct flow_cls_offload *f,
3733 struct iavf_cloud_filter *filter)
3734 {
3735 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3736 struct flow_dissector *dissector = rule->match.dissector;
3737 u16 n_proto_mask = 0;
3738 u16 n_proto_key = 0;
3739 u8 field_flags = 0;
3740 u16 addr_type = 0;
3741 u16 n_proto = 0;
3742 int i = 0;
3743 struct virtchnl_filter *vf = &filter->f;
3744
3745 if (dissector->used_keys &
3746 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
3747 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
3748 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
3749 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
3750 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
3751 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
3752 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
3753 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
3754 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%llx\n",
3755 dissector->used_keys);
3756 return -EOPNOTSUPP;
3757 }
3758
3759 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
3760 struct flow_match_enc_keyid match;
3761
3762 flow_rule_match_enc_keyid(rule, &match);
3763 if (match.mask->keyid != 0)
3764 field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
3765 }
3766
3767 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
3768 struct flow_match_basic match;
3769
3770 flow_rule_match_basic(rule, &match);
3771 n_proto_key = ntohs(match.key->n_proto);
3772 n_proto_mask = ntohs(match.mask->n_proto);
3773
3774 if (n_proto_key == ETH_P_ALL) {
3775 n_proto_key = 0;
3776 n_proto_mask = 0;
3777 }
3778 n_proto = n_proto_key & n_proto_mask;
3779 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
3780 return -EINVAL;
3781 if (n_proto == ETH_P_IPV6) {
3782 /* specify flow type as TCP IPv6 */
3783 vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
3784 }
3785
3786 if (match.key->ip_proto != IPPROTO_TCP) {
3787 dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
3788 return -EINVAL;
3789 }
3790 }
3791
3792 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
3793 struct flow_match_eth_addrs match;
3794
3795 flow_rule_match_eth_addrs(rule, &match);
3796
3797 /* use is_broadcast and is_zero to check for all 0xf or 0 */
3798 if (!is_zero_ether_addr(match.mask->dst)) {
3799 if (is_broadcast_ether_addr(match.mask->dst)) {
3800 field_flags |= IAVF_CLOUD_FIELD_OMAC;
3801 } else {
3802 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
3803 match.mask->dst);
3804 return -EINVAL;
3805 }
3806 }
3807
3808 if (!is_zero_ether_addr(match.mask->src)) {
3809 if (is_broadcast_ether_addr(match.mask->src)) {
3810 field_flags |= IAVF_CLOUD_FIELD_IMAC;
3811 } else {
3812 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
3813 match.mask->src);
3814 return -EINVAL;
3815 }
3816 }
3817
3818 if (!is_zero_ether_addr(match.key->dst))
3819 if (is_valid_ether_addr(match.key->dst) ||
3820 is_multicast_ether_addr(match.key->dst)) {
3821 /* set the mask if a valid dst_mac address */
3822 for (i = 0; i < ETH_ALEN; i++)
3823 vf->mask.tcp_spec.dst_mac[i] |= 0xff;
3824 ether_addr_copy(vf->data.tcp_spec.dst_mac,
3825 match.key->dst);
3826 }
3827
3828 if (!is_zero_ether_addr(match.key->src))
3829 if (is_valid_ether_addr(match.key->src) ||
3830 is_multicast_ether_addr(match.key->src)) {
3831 /* set the mask if a valid dst_mac address */
3832 for (i = 0; i < ETH_ALEN; i++)
3833 vf->mask.tcp_spec.src_mac[i] |= 0xff;
3834 ether_addr_copy(vf->data.tcp_spec.src_mac,
3835 match.key->src);
3836 }
3837 }
3838
3839 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
3840 struct flow_match_vlan match;
3841
3842 flow_rule_match_vlan(rule, &match);
3843 if (match.mask->vlan_id) {
3844 if (match.mask->vlan_id == VLAN_VID_MASK) {
3845 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
3846 } else {
3847 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
3848 match.mask->vlan_id);
3849 return -EINVAL;
3850 }
3851 }
3852 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
3853 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
3854 }
3855
3856 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
3857 struct flow_match_control match;
3858
3859 flow_rule_match_control(rule, &match);
3860 addr_type = match.key->addr_type;
3861 }
3862
3863 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
3864 struct flow_match_ipv4_addrs match;
3865
3866 flow_rule_match_ipv4_addrs(rule, &match);
3867 if (match.mask->dst) {
3868 if (match.mask->dst == cpu_to_be32(0xffffffff)) {
3869 field_flags |= IAVF_CLOUD_FIELD_IIP;
3870 } else {
3871 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
3872 be32_to_cpu(match.mask->dst));
3873 return -EINVAL;
3874 }
3875 }
3876
3877 if (match.mask->src) {
3878 if (match.mask->src == cpu_to_be32(0xffffffff)) {
3879 field_flags |= IAVF_CLOUD_FIELD_IIP;
3880 } else {
3881 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
3882 be32_to_cpu(match.mask->src));
3883 return -EINVAL;
3884 }
3885 }
3886
3887 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
3888 dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
3889 return -EINVAL;
3890 }
3891 if (match.key->dst) {
3892 vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
3893 vf->data.tcp_spec.dst_ip[0] = match.key->dst;
3894 }
3895 if (match.key->src) {
3896 vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
3897 vf->data.tcp_spec.src_ip[0] = match.key->src;
3898 }
3899 }
3900
3901 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
3902 struct flow_match_ipv6_addrs match;
3903
3904 flow_rule_match_ipv6_addrs(rule, &match);
3905
3906 /* validate mask, make sure it is not IPV6_ADDR_ANY */
3907 if (ipv6_addr_any(&match.mask->dst)) {
3908 dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
3909 IPV6_ADDR_ANY);
3910 return -EINVAL;
3911 }
3912
3913 /* src and dest IPv6 address should not be LOOPBACK
3914 * (0:0:0:0:0:0:0:1) which can be represented as ::1
3915 */
3916 if (ipv6_addr_loopback(&match.key->dst) ||
3917 ipv6_addr_loopback(&match.key->src)) {
3918 dev_err(&adapter->pdev->dev,
3919 "ipv6 addr should not be loopback\n");
3920 return -EINVAL;
3921 }
3922 if (!ipv6_addr_any(&match.mask->dst) ||
3923 !ipv6_addr_any(&match.mask->src))
3924 field_flags |= IAVF_CLOUD_FIELD_IIP;
3925
3926 for (i = 0; i < 4; i++)
3927 vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
3928 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
3929 sizeof(vf->data.tcp_spec.dst_ip));
3930 for (i = 0; i < 4; i++)
3931 vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
3932 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
3933 sizeof(vf->data.tcp_spec.src_ip));
3934 }
3935 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3936 struct flow_match_ports match;
3937
3938 flow_rule_match_ports(rule, &match);
3939 if (match.mask->src) {
3940 if (match.mask->src == cpu_to_be16(0xffff)) {
3941 field_flags |= IAVF_CLOUD_FIELD_IIP;
3942 } else {
3943 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
3944 be16_to_cpu(match.mask->src));
3945 return -EINVAL;
3946 }
3947 }
3948
3949 if (match.mask->dst) {
3950 if (match.mask->dst == cpu_to_be16(0xffff)) {
3951 field_flags |= IAVF_CLOUD_FIELD_IIP;
3952 } else {
3953 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
3954 be16_to_cpu(match.mask->dst));
3955 return -EINVAL;
3956 }
3957 }
3958 if (match.key->dst) {
3959 vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
3960 vf->data.tcp_spec.dst_port = match.key->dst;
3961 }
3962
3963 if (match.key->src) {
3964 vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
3965 vf->data.tcp_spec.src_port = match.key->src;
3966 }
3967 }
3968 vf->field_flags = field_flags;
3969
3970 return 0;
3971 }
3972
3973 /**
3974 * iavf_handle_tclass - Forward to a traffic class on the device
3975 * @adapter: board private structure
3976 * @tc: traffic class index on the device
3977 * @filter: pointer to cloud filter structure
3978 */
iavf_handle_tclass(struct iavf_adapter * adapter,u32 tc,struct iavf_cloud_filter * filter)3979 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
3980 struct iavf_cloud_filter *filter)
3981 {
3982 if (tc == 0)
3983 return 0;
3984 if (tc < adapter->num_tc) {
3985 if (!filter->f.data.tcp_spec.dst_port) {
3986 dev_err(&adapter->pdev->dev,
3987 "Specify destination port to redirect to traffic class other than TC0\n");
3988 return -EINVAL;
3989 }
3990 }
3991 /* redirect to a traffic class on the same device */
3992 filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3993 filter->f.action_meta = tc;
3994 return 0;
3995 }
3996
3997 /**
3998 * iavf_find_cf - Find the cloud filter in the list
3999 * @adapter: Board private structure
4000 * @cookie: filter specific cookie
4001 *
4002 * Returns ptr to the filter object or NULL. Must be called while holding the
4003 * cloud_filter_list_lock.
4004 */
iavf_find_cf(struct iavf_adapter * adapter,unsigned long * cookie)4005 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
4006 unsigned long *cookie)
4007 {
4008 struct iavf_cloud_filter *filter = NULL;
4009
4010 if (!cookie)
4011 return NULL;
4012
4013 list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
4014 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
4015 return filter;
4016 }
4017 return NULL;
4018 }
4019
4020 /**
4021 * iavf_configure_clsflower - Add tc flower filters
4022 * @adapter: board private structure
4023 * @cls_flower: Pointer to struct flow_cls_offload
4024 */
iavf_configure_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4025 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
4026 struct flow_cls_offload *cls_flower)
4027 {
4028 int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
4029 struct iavf_cloud_filter *filter = NULL;
4030 int err = -EINVAL, count = 50;
4031
4032 if (tc < 0) {
4033 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
4034 return -EINVAL;
4035 }
4036
4037 filter = kzalloc(sizeof(*filter), GFP_KERNEL);
4038 if (!filter)
4039 return -ENOMEM;
4040
4041 while (!mutex_trylock(&adapter->crit_lock)) {
4042 if (--count == 0) {
4043 kfree(filter);
4044 return err;
4045 }
4046 udelay(1);
4047 }
4048
4049 filter->cookie = cls_flower->cookie;
4050
4051 /* bail out here if filter already exists */
4052 spin_lock_bh(&adapter->cloud_filter_list_lock);
4053 if (iavf_find_cf(adapter, &cls_flower->cookie)) {
4054 dev_err(&adapter->pdev->dev, "Failed to add TC Flower filter, it already exists\n");
4055 err = -EEXIST;
4056 goto spin_unlock;
4057 }
4058 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4059
4060 /* set the mask to all zeroes to begin with */
4061 memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
4062 /* start out with flow type and eth type IPv4 to begin with */
4063 filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
4064 err = iavf_parse_cls_flower(adapter, cls_flower, filter);
4065 if (err)
4066 goto err;
4067
4068 err = iavf_handle_tclass(adapter, tc, filter);
4069 if (err)
4070 goto err;
4071
4072 /* add filter to the list */
4073 spin_lock_bh(&adapter->cloud_filter_list_lock);
4074 list_add_tail(&filter->list, &adapter->cloud_filter_list);
4075 adapter->num_cloud_filters++;
4076 filter->add = true;
4077 adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
4078 spin_unlock:
4079 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4080 err:
4081 if (err)
4082 kfree(filter);
4083
4084 mutex_unlock(&adapter->crit_lock);
4085 return err;
4086 }
4087
4088 /**
4089 * iavf_delete_clsflower - Remove tc flower filters
4090 * @adapter: board private structure
4091 * @cls_flower: Pointer to struct flow_cls_offload
4092 */
iavf_delete_clsflower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4093 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
4094 struct flow_cls_offload *cls_flower)
4095 {
4096 struct iavf_cloud_filter *filter = NULL;
4097 int err = 0;
4098
4099 spin_lock_bh(&adapter->cloud_filter_list_lock);
4100 filter = iavf_find_cf(adapter, &cls_flower->cookie);
4101 if (filter) {
4102 filter->del = true;
4103 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
4104 } else {
4105 err = -EINVAL;
4106 }
4107 spin_unlock_bh(&adapter->cloud_filter_list_lock);
4108
4109 return err;
4110 }
4111
4112 /**
4113 * iavf_setup_tc_cls_flower - flower classifier offloads
4114 * @adapter: board private structure
4115 * @cls_flower: pointer to flow_cls_offload struct with flow info
4116 */
iavf_setup_tc_cls_flower(struct iavf_adapter * adapter,struct flow_cls_offload * cls_flower)4117 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
4118 struct flow_cls_offload *cls_flower)
4119 {
4120 switch (cls_flower->command) {
4121 case FLOW_CLS_REPLACE:
4122 return iavf_configure_clsflower(adapter, cls_flower);
4123 case FLOW_CLS_DESTROY:
4124 return iavf_delete_clsflower(adapter, cls_flower);
4125 case FLOW_CLS_STATS:
4126 return -EOPNOTSUPP;
4127 default:
4128 return -EOPNOTSUPP;
4129 }
4130 }
4131
4132 /**
4133 * iavf_setup_tc_block_cb - block callback for tc
4134 * @type: type of offload
4135 * @type_data: offload data
4136 * @cb_priv:
4137 *
4138 * This function is the block callback for traffic classes
4139 **/
iavf_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)4140 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4141 void *cb_priv)
4142 {
4143 struct iavf_adapter *adapter = cb_priv;
4144
4145 if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
4146 return -EOPNOTSUPP;
4147
4148 switch (type) {
4149 case TC_SETUP_CLSFLOWER:
4150 return iavf_setup_tc_cls_flower(cb_priv, type_data);
4151 default:
4152 return -EOPNOTSUPP;
4153 }
4154 }
4155
4156 static LIST_HEAD(iavf_block_cb_list);
4157
4158 /**
4159 * iavf_setup_tc - configure multiple traffic classes
4160 * @netdev: network interface device structure
4161 * @type: type of offload
4162 * @type_data: tc offload data
4163 *
4164 * This function is the callback to ndo_setup_tc in the
4165 * netdev_ops.
4166 *
4167 * Returns 0 on success
4168 **/
iavf_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)4169 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
4170 void *type_data)
4171 {
4172 struct iavf_adapter *adapter = netdev_priv(netdev);
4173
4174 switch (type) {
4175 case TC_SETUP_QDISC_MQPRIO:
4176 return __iavf_setup_tc(netdev, type_data);
4177 case TC_SETUP_BLOCK:
4178 return flow_block_cb_setup_simple(type_data,
4179 &iavf_block_cb_list,
4180 iavf_setup_tc_block_cb,
4181 adapter, adapter, true);
4182 default:
4183 return -EOPNOTSUPP;
4184 }
4185 }
4186
4187 /**
4188 * iavf_open - Called when a network interface is made active
4189 * @netdev: network interface device structure
4190 *
4191 * Returns 0 on success, negative value on failure
4192 *
4193 * The open entry point is called when a network interface is made
4194 * active by the system (IFF_UP). At this point all resources needed
4195 * for transmit and receive operations are allocated, the interrupt
4196 * handler is registered with the OS, the watchdog is started,
4197 * and the stack is notified that the interface is ready.
4198 **/
iavf_open(struct net_device * netdev)4199 static int iavf_open(struct net_device *netdev)
4200 {
4201 struct iavf_adapter *adapter = netdev_priv(netdev);
4202 int err;
4203
4204 if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
4205 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
4206 return -EIO;
4207 }
4208
4209 while (!mutex_trylock(&adapter->crit_lock)) {
4210 /* If we are in __IAVF_INIT_CONFIG_ADAPTER state the crit_lock
4211 * is already taken and iavf_open is called from an upper
4212 * device's notifier reacting on NETDEV_REGISTER event.
4213 * We have to leave here to avoid dead lock.
4214 */
4215 if (adapter->state == __IAVF_INIT_CONFIG_ADAPTER)
4216 return -EBUSY;
4217
4218 usleep_range(500, 1000);
4219 }
4220
4221 if (adapter->state != __IAVF_DOWN) {
4222 err = -EBUSY;
4223 goto err_unlock;
4224 }
4225
4226 if (adapter->state == __IAVF_RUNNING &&
4227 !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
4228 dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
4229 err = 0;
4230 goto err_unlock;
4231 }
4232
4233 /* allocate transmit descriptors */
4234 err = iavf_setup_all_tx_resources(adapter);
4235 if (err)
4236 goto err_setup_tx;
4237
4238 /* allocate receive descriptors */
4239 err = iavf_setup_all_rx_resources(adapter);
4240 if (err)
4241 goto err_setup_rx;
4242
4243 /* clear any pending interrupts, may auto mask */
4244 err = iavf_request_traffic_irqs(adapter, netdev->name);
4245 if (err)
4246 goto err_req_irq;
4247
4248 spin_lock_bh(&adapter->mac_vlan_list_lock);
4249
4250 iavf_add_filter(adapter, adapter->hw.mac.addr);
4251
4252 spin_unlock_bh(&adapter->mac_vlan_list_lock);
4253
4254 /* Restore VLAN filters that were removed with IFF_DOWN */
4255 iavf_restore_filters(adapter);
4256
4257 iavf_configure(adapter);
4258
4259 iavf_up_complete(adapter);
4260
4261 iavf_irq_enable(adapter, true);
4262
4263 mutex_unlock(&adapter->crit_lock);
4264
4265 return 0;
4266
4267 err_req_irq:
4268 iavf_down(adapter);
4269 iavf_free_traffic_irqs(adapter);
4270 err_setup_rx:
4271 iavf_free_all_rx_resources(adapter);
4272 err_setup_tx:
4273 iavf_free_all_tx_resources(adapter);
4274 err_unlock:
4275 mutex_unlock(&adapter->crit_lock);
4276
4277 return err;
4278 }
4279
4280 /**
4281 * iavf_close - Disables a network interface
4282 * @netdev: network interface device structure
4283 *
4284 * Returns 0, this is not allowed to fail
4285 *
4286 * The close entry point is called when an interface is de-activated
4287 * by the OS. The hardware is still under the drivers control, but
4288 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
4289 * are freed, along with all transmit and receive resources.
4290 **/
iavf_close(struct net_device * netdev)4291 static int iavf_close(struct net_device *netdev)
4292 {
4293 struct iavf_adapter *adapter = netdev_priv(netdev);
4294 u64 aq_to_restore;
4295 int status;
4296
4297 mutex_lock(&adapter->crit_lock);
4298
4299 if (adapter->state <= __IAVF_DOWN_PENDING) {
4300 mutex_unlock(&adapter->crit_lock);
4301 return 0;
4302 }
4303
4304 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
4305 if (CLIENT_ENABLED(adapter))
4306 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
4307 /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
4308 * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
4309 * deadlock with adminq_task() until iavf_close timeouts. We must send
4310 * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
4311 * disable queues possible for vf. Give only necessary flags to
4312 * iavf_down and save other to set them right before iavf_close()
4313 * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
4314 * iavf will be in DOWN state.
4315 */
4316 aq_to_restore = adapter->aq_required;
4317 adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
4318
4319 /* Remove flags which we do not want to send after close or we want to
4320 * send before disable queues.
4321 */
4322 aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
4323 IAVF_FLAG_AQ_ENABLE_QUEUES |
4324 IAVF_FLAG_AQ_CONFIGURE_QUEUES |
4325 IAVF_FLAG_AQ_ADD_VLAN_FILTER |
4326 IAVF_FLAG_AQ_ADD_MAC_FILTER |
4327 IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
4328 IAVF_FLAG_AQ_ADD_FDIR_FILTER |
4329 IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
4330
4331 iavf_down(adapter);
4332 iavf_change_state(adapter, __IAVF_DOWN_PENDING);
4333 iavf_free_traffic_irqs(adapter);
4334
4335 mutex_unlock(&adapter->crit_lock);
4336
4337 /* We explicitly don't free resources here because the hardware is
4338 * still active and can DMA into memory. Resources are cleared in
4339 * iavf_virtchnl_completion() after we get confirmation from the PF
4340 * driver that the rings have been stopped.
4341 *
4342 * Also, we wait for state to transition to __IAVF_DOWN before
4343 * returning. State change occurs in iavf_virtchnl_completion() after
4344 * VF resources are released (which occurs after PF driver processes and
4345 * responds to admin queue commands).
4346 */
4347
4348 status = wait_event_timeout(adapter->down_waitqueue,
4349 adapter->state == __IAVF_DOWN,
4350 msecs_to_jiffies(500));
4351 if (!status)
4352 netdev_warn(netdev, "Device resources not yet released\n");
4353
4354 mutex_lock(&adapter->crit_lock);
4355 adapter->aq_required |= aq_to_restore;
4356 mutex_unlock(&adapter->crit_lock);
4357 return 0;
4358 }
4359
4360 /**
4361 * iavf_change_mtu - Change the Maximum Transfer Unit
4362 * @netdev: network interface device structure
4363 * @new_mtu: new value for maximum frame size
4364 *
4365 * Returns 0 on success, negative on failure
4366 **/
iavf_change_mtu(struct net_device * netdev,int new_mtu)4367 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
4368 {
4369 struct iavf_adapter *adapter = netdev_priv(netdev);
4370 int ret = 0;
4371
4372 netdev_dbg(netdev, "changing MTU from %d to %d\n",
4373 netdev->mtu, new_mtu);
4374 netdev->mtu = new_mtu;
4375 if (CLIENT_ENABLED(adapter)) {
4376 iavf_notify_client_l2_params(&adapter->vsi);
4377 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
4378 }
4379
4380 if (netif_running(netdev)) {
4381 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
4382 ret = iavf_wait_for_reset(adapter);
4383 if (ret < 0)
4384 netdev_warn(netdev, "MTU change interrupted waiting for reset");
4385 else if (ret)
4386 netdev_warn(netdev, "MTU change timed out waiting for reset");
4387 }
4388
4389 return ret;
4390 }
4391
4392 #define NETIF_VLAN_OFFLOAD_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
4393 NETIF_F_HW_VLAN_CTAG_TX | \
4394 NETIF_F_HW_VLAN_STAG_RX | \
4395 NETIF_F_HW_VLAN_STAG_TX)
4396
4397 /**
4398 * iavf_set_features - set the netdev feature flags
4399 * @netdev: ptr to the netdev being adjusted
4400 * @features: the feature set that the stack is suggesting
4401 * Note: expects to be called while under rtnl_lock()
4402 **/
iavf_set_features(struct net_device * netdev,netdev_features_t features)4403 static int iavf_set_features(struct net_device *netdev,
4404 netdev_features_t features)
4405 {
4406 struct iavf_adapter *adapter = netdev_priv(netdev);
4407
4408 /* trigger update on any VLAN feature change */
4409 if ((netdev->features & NETIF_VLAN_OFFLOAD_FEATURES) ^
4410 (features & NETIF_VLAN_OFFLOAD_FEATURES))
4411 iavf_set_vlan_offload_features(adapter, netdev->features,
4412 features);
4413
4414 return 0;
4415 }
4416
4417 /**
4418 * iavf_features_check - Validate encapsulated packet conforms to limits
4419 * @skb: skb buff
4420 * @dev: This physical port's netdev
4421 * @features: Offload features that the stack believes apply
4422 **/
iavf_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4423 static netdev_features_t iavf_features_check(struct sk_buff *skb,
4424 struct net_device *dev,
4425 netdev_features_t features)
4426 {
4427 size_t len;
4428
4429 /* No point in doing any of this if neither checksum nor GSO are
4430 * being requested for this frame. We can rule out both by just
4431 * checking for CHECKSUM_PARTIAL
4432 */
4433 if (skb->ip_summed != CHECKSUM_PARTIAL)
4434 return features;
4435
4436 /* We cannot support GSO if the MSS is going to be less than
4437 * 64 bytes. If it is then we need to drop support for GSO.
4438 */
4439 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
4440 features &= ~NETIF_F_GSO_MASK;
4441
4442 /* MACLEN can support at most 63 words */
4443 len = skb_network_header(skb) - skb->data;
4444 if (len & ~(63 * 2))
4445 goto out_err;
4446
4447 /* IPLEN and EIPLEN can support at most 127 dwords */
4448 len = skb_transport_header(skb) - skb_network_header(skb);
4449 if (len & ~(127 * 4))
4450 goto out_err;
4451
4452 if (skb->encapsulation) {
4453 /* L4TUNLEN can support 127 words */
4454 len = skb_inner_network_header(skb) - skb_transport_header(skb);
4455 if (len & ~(127 * 2))
4456 goto out_err;
4457
4458 /* IPLEN can support at most 127 dwords */
4459 len = skb_inner_transport_header(skb) -
4460 skb_inner_network_header(skb);
4461 if (len & ~(127 * 4))
4462 goto out_err;
4463 }
4464
4465 /* No need to validate L4LEN as TCP is the only protocol with a
4466 * flexible value and we support all possible values supported
4467 * by TCP, which is at most 15 dwords
4468 */
4469
4470 return features;
4471 out_err:
4472 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
4473 }
4474
4475 /**
4476 * iavf_get_netdev_vlan_hw_features - get NETDEV VLAN features that can toggle on/off
4477 * @adapter: board private structure
4478 *
4479 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4480 * were negotiated determine the VLAN features that can be toggled on and off.
4481 **/
4482 static netdev_features_t
iavf_get_netdev_vlan_hw_features(struct iavf_adapter * adapter)4483 iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
4484 {
4485 netdev_features_t hw_features = 0;
4486
4487 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4488 return hw_features;
4489
4490 /* Enable VLAN features if supported */
4491 if (VLAN_ALLOWED(adapter)) {
4492 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
4493 NETIF_F_HW_VLAN_CTAG_RX);
4494 } else if (VLAN_V2_ALLOWED(adapter)) {
4495 struct virtchnl_vlan_caps *vlan_v2_caps =
4496 &adapter->vlan_v2_caps;
4497 struct virtchnl_vlan_supported_caps *stripping_support =
4498 &vlan_v2_caps->offloads.stripping_support;
4499 struct virtchnl_vlan_supported_caps *insertion_support =
4500 &vlan_v2_caps->offloads.insertion_support;
4501
4502 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4503 stripping_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4504 if (stripping_support->outer &
4505 VIRTCHNL_VLAN_ETHERTYPE_8100)
4506 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4507 if (stripping_support->outer &
4508 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4509 hw_features |= NETIF_F_HW_VLAN_STAG_RX;
4510 } else if (stripping_support->inner !=
4511 VIRTCHNL_VLAN_UNSUPPORTED &&
4512 stripping_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4513 if (stripping_support->inner &
4514 VIRTCHNL_VLAN_ETHERTYPE_8100)
4515 hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4516 }
4517
4518 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED &&
4519 insertion_support->outer & VIRTCHNL_VLAN_TOGGLE) {
4520 if (insertion_support->outer &
4521 VIRTCHNL_VLAN_ETHERTYPE_8100)
4522 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4523 if (insertion_support->outer &
4524 VIRTCHNL_VLAN_ETHERTYPE_88A8)
4525 hw_features |= NETIF_F_HW_VLAN_STAG_TX;
4526 } else if (insertion_support->inner &&
4527 insertion_support->inner & VIRTCHNL_VLAN_TOGGLE) {
4528 if (insertion_support->inner &
4529 VIRTCHNL_VLAN_ETHERTYPE_8100)
4530 hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4531 }
4532 }
4533
4534 return hw_features;
4535 }
4536
4537 /**
4538 * iavf_get_netdev_vlan_features - get the enabled NETDEV VLAN fetures
4539 * @adapter: board private structure
4540 *
4541 * Depending on whether VIRTHCNL_VF_OFFLOAD_VLAN or VIRTCHNL_VF_OFFLOAD_VLAN_V2
4542 * were negotiated determine the VLAN features that are enabled by default.
4543 **/
4544 static netdev_features_t
iavf_get_netdev_vlan_features(struct iavf_adapter * adapter)4545 iavf_get_netdev_vlan_features(struct iavf_adapter *adapter)
4546 {
4547 netdev_features_t features = 0;
4548
4549 if (!adapter->vf_res || !adapter->vf_res->vf_cap_flags)
4550 return features;
4551
4552 if (VLAN_ALLOWED(adapter)) {
4553 features |= NETIF_F_HW_VLAN_CTAG_FILTER |
4554 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX;
4555 } else if (VLAN_V2_ALLOWED(adapter)) {
4556 struct virtchnl_vlan_caps *vlan_v2_caps =
4557 &adapter->vlan_v2_caps;
4558 struct virtchnl_vlan_supported_caps *filtering_support =
4559 &vlan_v2_caps->filtering.filtering_support;
4560 struct virtchnl_vlan_supported_caps *stripping_support =
4561 &vlan_v2_caps->offloads.stripping_support;
4562 struct virtchnl_vlan_supported_caps *insertion_support =
4563 &vlan_v2_caps->offloads.insertion_support;
4564 u32 ethertype_init;
4565
4566 /* give priority to outer stripping and don't support both outer
4567 * and inner stripping
4568 */
4569 ethertype_init = vlan_v2_caps->offloads.ethertype_init;
4570 if (stripping_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4571 if (stripping_support->outer &
4572 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4573 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4574 features |= NETIF_F_HW_VLAN_CTAG_RX;
4575 else if (stripping_support->outer &
4576 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4577 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4578 features |= NETIF_F_HW_VLAN_STAG_RX;
4579 } else if (stripping_support->inner !=
4580 VIRTCHNL_VLAN_UNSUPPORTED) {
4581 if (stripping_support->inner &
4582 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4583 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4584 features |= NETIF_F_HW_VLAN_CTAG_RX;
4585 }
4586
4587 /* give priority to outer insertion and don't support both outer
4588 * and inner insertion
4589 */
4590 if (insertion_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4591 if (insertion_support->outer &
4592 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4593 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4594 features |= NETIF_F_HW_VLAN_CTAG_TX;
4595 else if (insertion_support->outer &
4596 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4597 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4598 features |= NETIF_F_HW_VLAN_STAG_TX;
4599 } else if (insertion_support->inner !=
4600 VIRTCHNL_VLAN_UNSUPPORTED) {
4601 if (insertion_support->inner &
4602 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4603 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4604 features |= NETIF_F_HW_VLAN_CTAG_TX;
4605 }
4606
4607 /* give priority to outer filtering and don't bother if both
4608 * outer and inner filtering are enabled
4609 */
4610 ethertype_init = vlan_v2_caps->filtering.ethertype_init;
4611 if (filtering_support->outer != VIRTCHNL_VLAN_UNSUPPORTED) {
4612 if (filtering_support->outer &
4613 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4614 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4615 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4616 if (filtering_support->outer &
4617 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4618 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4619 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4620 } else if (filtering_support->inner !=
4621 VIRTCHNL_VLAN_UNSUPPORTED) {
4622 if (filtering_support->inner &
4623 VIRTCHNL_VLAN_ETHERTYPE_8100 &&
4624 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_8100)
4625 features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4626 if (filtering_support->inner &
4627 VIRTCHNL_VLAN_ETHERTYPE_88A8 &&
4628 ethertype_init & VIRTCHNL_VLAN_ETHERTYPE_88A8)
4629 features |= NETIF_F_HW_VLAN_STAG_FILTER;
4630 }
4631 }
4632
4633 return features;
4634 }
4635
4636 #define IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested, allowed, feature_bit) \
4637 (!(((requested) & (feature_bit)) && \
4638 !((allowed) & (feature_bit))))
4639
4640 /**
4641 * iavf_fix_netdev_vlan_features - fix NETDEV VLAN features based on support
4642 * @adapter: board private structure
4643 * @requested_features: stack requested NETDEV features
4644 **/
4645 static netdev_features_t
iavf_fix_netdev_vlan_features(struct iavf_adapter * adapter,netdev_features_t requested_features)4646 iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
4647 netdev_features_t requested_features)
4648 {
4649 netdev_features_t allowed_features;
4650
4651 allowed_features = iavf_get_netdev_vlan_hw_features(adapter) |
4652 iavf_get_netdev_vlan_features(adapter);
4653
4654 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4655 allowed_features,
4656 NETIF_F_HW_VLAN_CTAG_TX))
4657 requested_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4658
4659 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4660 allowed_features,
4661 NETIF_F_HW_VLAN_CTAG_RX))
4662 requested_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
4663
4664 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4665 allowed_features,
4666 NETIF_F_HW_VLAN_STAG_TX))
4667 requested_features &= ~NETIF_F_HW_VLAN_STAG_TX;
4668 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4669 allowed_features,
4670 NETIF_F_HW_VLAN_STAG_RX))
4671 requested_features &= ~NETIF_F_HW_VLAN_STAG_RX;
4672
4673 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4674 allowed_features,
4675 NETIF_F_HW_VLAN_CTAG_FILTER))
4676 requested_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
4677
4678 if (!IAVF_NETDEV_VLAN_FEATURE_ALLOWED(requested_features,
4679 allowed_features,
4680 NETIF_F_HW_VLAN_STAG_FILTER))
4681 requested_features &= ~NETIF_F_HW_VLAN_STAG_FILTER;
4682
4683 if ((requested_features &
4684 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX)) &&
4685 (requested_features &
4686 (NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX)) &&
4687 adapter->vlan_v2_caps.offloads.ethertype_match ==
4688 VIRTCHNL_ETHERTYPE_STRIPPING_MATCHES_INSERTION) {
4689 netdev_warn(adapter->netdev, "cannot support CTAG and STAG VLAN stripping and/or insertion simultaneously since CTAG and STAG offloads are mutually exclusive, clearing STAG offload settings\n");
4690 requested_features &= ~(NETIF_F_HW_VLAN_STAG_RX |
4691 NETIF_F_HW_VLAN_STAG_TX);
4692 }
4693
4694 return requested_features;
4695 }
4696
4697 /**
4698 * iavf_fix_features - fix up the netdev feature bits
4699 * @netdev: our net device
4700 * @features: desired feature bits
4701 *
4702 * Returns fixed-up features bits
4703 **/
iavf_fix_features(struct net_device * netdev,netdev_features_t features)4704 static netdev_features_t iavf_fix_features(struct net_device *netdev,
4705 netdev_features_t features)
4706 {
4707 struct iavf_adapter *adapter = netdev_priv(netdev);
4708
4709 return iavf_fix_netdev_vlan_features(adapter, features);
4710 }
4711
4712 static const struct net_device_ops iavf_netdev_ops = {
4713 .ndo_open = iavf_open,
4714 .ndo_stop = iavf_close,
4715 .ndo_start_xmit = iavf_xmit_frame,
4716 .ndo_set_rx_mode = iavf_set_rx_mode,
4717 .ndo_validate_addr = eth_validate_addr,
4718 .ndo_set_mac_address = iavf_set_mac,
4719 .ndo_change_mtu = iavf_change_mtu,
4720 .ndo_tx_timeout = iavf_tx_timeout,
4721 .ndo_vlan_rx_add_vid = iavf_vlan_rx_add_vid,
4722 .ndo_vlan_rx_kill_vid = iavf_vlan_rx_kill_vid,
4723 .ndo_features_check = iavf_features_check,
4724 .ndo_fix_features = iavf_fix_features,
4725 .ndo_set_features = iavf_set_features,
4726 .ndo_setup_tc = iavf_setup_tc,
4727 };
4728
4729 /**
4730 * iavf_check_reset_complete - check that VF reset is complete
4731 * @hw: pointer to hw struct
4732 *
4733 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
4734 **/
iavf_check_reset_complete(struct iavf_hw * hw)4735 static int iavf_check_reset_complete(struct iavf_hw *hw)
4736 {
4737 u32 rstat;
4738 int i;
4739
4740 for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
4741 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
4742 IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
4743 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
4744 (rstat == VIRTCHNL_VFR_COMPLETED))
4745 return 0;
4746 usleep_range(10, 20);
4747 }
4748 return -EBUSY;
4749 }
4750
4751 /**
4752 * iavf_process_config - Process the config information we got from the PF
4753 * @adapter: board private structure
4754 *
4755 * Verify that we have a valid config struct, and set up our netdev features
4756 * and our VSI struct.
4757 **/
iavf_process_config(struct iavf_adapter * adapter)4758 int iavf_process_config(struct iavf_adapter *adapter)
4759 {
4760 struct virtchnl_vf_resource *vfres = adapter->vf_res;
4761 netdev_features_t hw_vlan_features, vlan_features;
4762 struct net_device *netdev = adapter->netdev;
4763 netdev_features_t hw_enc_features;
4764 netdev_features_t hw_features;
4765
4766 hw_enc_features = NETIF_F_SG |
4767 NETIF_F_IP_CSUM |
4768 NETIF_F_IPV6_CSUM |
4769 NETIF_F_HIGHDMA |
4770 NETIF_F_SOFT_FEATURES |
4771 NETIF_F_TSO |
4772 NETIF_F_TSO_ECN |
4773 NETIF_F_TSO6 |
4774 NETIF_F_SCTP_CRC |
4775 NETIF_F_RXHASH |
4776 NETIF_F_RXCSUM |
4777 0;
4778
4779 /* advertise to stack only if offloads for encapsulated packets is
4780 * supported
4781 */
4782 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
4783 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4784 NETIF_F_GSO_GRE |
4785 NETIF_F_GSO_GRE_CSUM |
4786 NETIF_F_GSO_IPXIP4 |
4787 NETIF_F_GSO_IPXIP6 |
4788 NETIF_F_GSO_UDP_TUNNEL_CSUM |
4789 NETIF_F_GSO_PARTIAL |
4790 0;
4791
4792 if (!(vfres->vf_cap_flags &
4793 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
4794 netdev->gso_partial_features |=
4795 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4796
4797 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
4798 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
4799 netdev->hw_enc_features |= hw_enc_features;
4800 }
4801 /* record features VLANs can make use of */
4802 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
4803
4804 /* Write features and hw_features separately to avoid polluting
4805 * with, or dropping, features that are set when we registered.
4806 */
4807 hw_features = hw_enc_features;
4808
4809 /* get HW VLAN features that can be toggled */
4810 hw_vlan_features = iavf_get_netdev_vlan_hw_features(adapter);
4811
4812 /* Enable cloud filter if ADQ is supported */
4813 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
4814 hw_features |= NETIF_F_HW_TC;
4815 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_USO)
4816 hw_features |= NETIF_F_GSO_UDP_L4;
4817
4818 netdev->hw_features |= hw_features | hw_vlan_features;
4819 vlan_features = iavf_get_netdev_vlan_features(adapter);
4820
4821 netdev->features |= hw_features | vlan_features;
4822
4823 if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
4824 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4825
4826 netdev->priv_flags |= IFF_UNICAST_FLT;
4827
4828 /* Do not turn on offloads when they are requested to be turned off.
4829 * TSO needs minimum 576 bytes to work correctly.
4830 */
4831 if (netdev->wanted_features) {
4832 if (!(netdev->wanted_features & NETIF_F_TSO) ||
4833 netdev->mtu < 576)
4834 netdev->features &= ~NETIF_F_TSO;
4835 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
4836 netdev->mtu < 576)
4837 netdev->features &= ~NETIF_F_TSO6;
4838 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
4839 netdev->features &= ~NETIF_F_TSO_ECN;
4840 if (!(netdev->wanted_features & NETIF_F_GRO))
4841 netdev->features &= ~NETIF_F_GRO;
4842 if (!(netdev->wanted_features & NETIF_F_GSO))
4843 netdev->features &= ~NETIF_F_GSO;
4844 }
4845
4846 return 0;
4847 }
4848
4849 /**
4850 * iavf_shutdown - Shutdown the device in preparation for a reboot
4851 * @pdev: pci device structure
4852 **/
iavf_shutdown(struct pci_dev * pdev)4853 static void iavf_shutdown(struct pci_dev *pdev)
4854 {
4855 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
4856 struct net_device *netdev = adapter->netdev;
4857
4858 netif_device_detach(netdev);
4859
4860 if (netif_running(netdev))
4861 iavf_close(netdev);
4862
4863 if (iavf_lock_timeout(&adapter->crit_lock, 5000))
4864 dev_warn(&adapter->pdev->dev, "%s: failed to acquire crit_lock\n", __func__);
4865 /* Prevent the watchdog from running. */
4866 iavf_change_state(adapter, __IAVF_REMOVE);
4867 adapter->aq_required = 0;
4868 mutex_unlock(&adapter->crit_lock);
4869
4870 #ifdef CONFIG_PM
4871 pci_save_state(pdev);
4872
4873 #endif
4874 pci_disable_device(pdev);
4875 }
4876
4877 /**
4878 * iavf_probe - Device Initialization Routine
4879 * @pdev: PCI device information struct
4880 * @ent: entry in iavf_pci_tbl
4881 *
4882 * Returns 0 on success, negative on failure
4883 *
4884 * iavf_probe initializes an adapter identified by a pci_dev structure.
4885 * The OS initialization, configuring of the adapter private structure,
4886 * and a hardware reset occur.
4887 **/
iavf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)4888 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4889 {
4890 struct net_device *netdev;
4891 struct iavf_adapter *adapter = NULL;
4892 struct iavf_hw *hw = NULL;
4893 int err;
4894
4895 err = pci_enable_device(pdev);
4896 if (err)
4897 return err;
4898
4899 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4900 if (err) {
4901 dev_err(&pdev->dev,
4902 "DMA configuration failed: 0x%x\n", err);
4903 goto err_dma;
4904 }
4905
4906 err = pci_request_regions(pdev, iavf_driver_name);
4907 if (err) {
4908 dev_err(&pdev->dev,
4909 "pci_request_regions failed 0x%x\n", err);
4910 goto err_pci_reg;
4911 }
4912
4913 pci_set_master(pdev);
4914
4915 netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
4916 IAVF_MAX_REQ_QUEUES);
4917 if (!netdev) {
4918 err = -ENOMEM;
4919 goto err_alloc_etherdev;
4920 }
4921
4922 SET_NETDEV_DEV(netdev, &pdev->dev);
4923
4924 pci_set_drvdata(pdev, netdev);
4925 adapter = netdev_priv(netdev);
4926
4927 adapter->netdev = netdev;
4928 adapter->pdev = pdev;
4929
4930 hw = &adapter->hw;
4931 hw->back = adapter;
4932
4933 adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4934 iavf_driver_name);
4935 if (!adapter->wq) {
4936 err = -ENOMEM;
4937 goto err_alloc_wq;
4938 }
4939
4940 adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4941 iavf_change_state(adapter, __IAVF_STARTUP);
4942
4943 /* Call save state here because it relies on the adapter struct. */
4944 pci_save_state(pdev);
4945
4946 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4947 pci_resource_len(pdev, 0));
4948 if (!hw->hw_addr) {
4949 err = -EIO;
4950 goto err_ioremap;
4951 }
4952 hw->vendor_id = pdev->vendor;
4953 hw->device_id = pdev->device;
4954 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
4955 hw->subsystem_vendor_id = pdev->subsystem_vendor;
4956 hw->subsystem_device_id = pdev->subsystem_device;
4957 hw->bus.device = PCI_SLOT(pdev->devfn);
4958 hw->bus.func = PCI_FUNC(pdev->devfn);
4959 hw->bus.bus_id = pdev->bus->number;
4960
4961 /* set up the locks for the AQ, do this only once in probe
4962 * and destroy them only once in remove
4963 */
4964 mutex_init(&adapter->crit_lock);
4965 mutex_init(&adapter->client_lock);
4966 mutex_init(&hw->aq.asq_mutex);
4967 mutex_init(&hw->aq.arq_mutex);
4968
4969 spin_lock_init(&adapter->mac_vlan_list_lock);
4970 spin_lock_init(&adapter->cloud_filter_list_lock);
4971 spin_lock_init(&adapter->fdir_fltr_lock);
4972 spin_lock_init(&adapter->adv_rss_lock);
4973
4974 INIT_LIST_HEAD(&adapter->mac_filter_list);
4975 INIT_LIST_HEAD(&adapter->vlan_filter_list);
4976 INIT_LIST_HEAD(&adapter->cloud_filter_list);
4977 INIT_LIST_HEAD(&adapter->fdir_list_head);
4978 INIT_LIST_HEAD(&adapter->adv_rss_list_head);
4979
4980 INIT_WORK(&adapter->reset_task, iavf_reset_task);
4981 INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
4982 INIT_WORK(&adapter->finish_config, iavf_finish_config);
4983 INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
4984 INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
4985
4986 /* Setup the wait queue for indicating transition to down status */
4987 init_waitqueue_head(&adapter->down_waitqueue);
4988
4989 /* Setup the wait queue for indicating transition to running state */
4990 init_waitqueue_head(&adapter->reset_waitqueue);
4991
4992 /* Setup the wait queue for indicating virtchannel events */
4993 init_waitqueue_head(&adapter->vc_waitqueue);
4994
4995 queue_delayed_work(adapter->wq, &adapter->watchdog_task,
4996 msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
4997 /* Initialization goes on in the work. Do not add more of it below. */
4998 return 0;
4999
5000 err_ioremap:
5001 destroy_workqueue(adapter->wq);
5002 err_alloc_wq:
5003 free_netdev(netdev);
5004 err_alloc_etherdev:
5005 pci_release_regions(pdev);
5006 err_pci_reg:
5007 err_dma:
5008 pci_disable_device(pdev);
5009 return err;
5010 }
5011
5012 /**
5013 * iavf_suspend - Power management suspend routine
5014 * @dev_d: device info pointer
5015 *
5016 * Called when the system (VM) is entering sleep/suspend.
5017 **/
iavf_suspend(struct device * dev_d)5018 static int __maybe_unused iavf_suspend(struct device *dev_d)
5019 {
5020 struct net_device *netdev = dev_get_drvdata(dev_d);
5021 struct iavf_adapter *adapter = netdev_priv(netdev);
5022
5023 netif_device_detach(netdev);
5024
5025 while (!mutex_trylock(&adapter->crit_lock))
5026 usleep_range(500, 1000);
5027
5028 if (netif_running(netdev)) {
5029 rtnl_lock();
5030 iavf_down(adapter);
5031 rtnl_unlock();
5032 }
5033 iavf_free_misc_irq(adapter);
5034 iavf_reset_interrupt_capability(adapter);
5035
5036 mutex_unlock(&adapter->crit_lock);
5037
5038 return 0;
5039 }
5040
5041 /**
5042 * iavf_resume - Power management resume routine
5043 * @dev_d: device info pointer
5044 *
5045 * Called when the system (VM) is resumed from sleep/suspend.
5046 **/
iavf_resume(struct device * dev_d)5047 static int __maybe_unused iavf_resume(struct device *dev_d)
5048 {
5049 struct pci_dev *pdev = to_pci_dev(dev_d);
5050 struct iavf_adapter *adapter;
5051 u32 err;
5052
5053 adapter = iavf_pdev_to_adapter(pdev);
5054
5055 pci_set_master(pdev);
5056
5057 rtnl_lock();
5058 err = iavf_set_interrupt_capability(adapter);
5059 if (err) {
5060 rtnl_unlock();
5061 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
5062 return err;
5063 }
5064 err = iavf_request_misc_irq(adapter);
5065 rtnl_unlock();
5066 if (err) {
5067 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
5068 return err;
5069 }
5070
5071 queue_work(adapter->wq, &adapter->reset_task);
5072
5073 netif_device_attach(adapter->netdev);
5074
5075 return err;
5076 }
5077
5078 /**
5079 * iavf_remove - Device Removal Routine
5080 * @pdev: PCI device information struct
5081 *
5082 * iavf_remove is called by the PCI subsystem to alert the driver
5083 * that it should release a PCI device. The could be caused by a
5084 * Hot-Plug event, or because the driver is going to be removed from
5085 * memory.
5086 **/
iavf_remove(struct pci_dev * pdev)5087 static void iavf_remove(struct pci_dev *pdev)
5088 {
5089 struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
5090 struct iavf_fdir_fltr *fdir, *fdirtmp;
5091 struct iavf_vlan_filter *vlf, *vlftmp;
5092 struct iavf_cloud_filter *cf, *cftmp;
5093 struct iavf_adv_rss *rss, *rsstmp;
5094 struct iavf_mac_filter *f, *ftmp;
5095 struct net_device *netdev;
5096 struct iavf_hw *hw;
5097 int err;
5098
5099 netdev = adapter->netdev;
5100 hw = &adapter->hw;
5101
5102 if (test_and_set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
5103 return;
5104
5105 /* Wait until port initialization is complete.
5106 * There are flows where register/unregister netdev may race.
5107 */
5108 while (1) {
5109 mutex_lock(&adapter->crit_lock);
5110 if (adapter->state == __IAVF_RUNNING ||
5111 adapter->state == __IAVF_DOWN ||
5112 adapter->state == __IAVF_INIT_FAILED) {
5113 mutex_unlock(&adapter->crit_lock);
5114 break;
5115 }
5116 /* Simply return if we already went through iavf_shutdown */
5117 if (adapter->state == __IAVF_REMOVE) {
5118 mutex_unlock(&adapter->crit_lock);
5119 return;
5120 }
5121
5122 mutex_unlock(&adapter->crit_lock);
5123 usleep_range(500, 1000);
5124 }
5125 cancel_delayed_work_sync(&adapter->watchdog_task);
5126 cancel_work_sync(&adapter->finish_config);
5127
5128 rtnl_lock();
5129 if (adapter->netdev_registered) {
5130 unregister_netdevice(netdev);
5131 adapter->netdev_registered = false;
5132 }
5133 rtnl_unlock();
5134
5135 if (CLIENT_ALLOWED(adapter)) {
5136 err = iavf_lan_del_device(adapter);
5137 if (err)
5138 dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
5139 err);
5140 }
5141
5142 mutex_lock(&adapter->crit_lock);
5143 dev_info(&adapter->pdev->dev, "Removing device\n");
5144 iavf_change_state(adapter, __IAVF_REMOVE);
5145
5146 iavf_request_reset(adapter);
5147 msleep(50);
5148 /* If the FW isn't responding, kick it once, but only once. */
5149 if (!iavf_asq_done(hw)) {
5150 iavf_request_reset(adapter);
5151 msleep(50);
5152 }
5153
5154 iavf_misc_irq_disable(adapter);
5155 /* Shut down all the garbage mashers on the detention level */
5156 cancel_work_sync(&adapter->reset_task);
5157 cancel_delayed_work_sync(&adapter->watchdog_task);
5158 cancel_work_sync(&adapter->adminq_task);
5159 cancel_delayed_work_sync(&adapter->client_task);
5160
5161 adapter->aq_required = 0;
5162 adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
5163
5164 iavf_free_all_tx_resources(adapter);
5165 iavf_free_all_rx_resources(adapter);
5166 iavf_free_misc_irq(adapter);
5167
5168 iavf_reset_interrupt_capability(adapter);
5169 iavf_free_q_vectors(adapter);
5170
5171 iavf_free_rss(adapter);
5172
5173 if (hw->aq.asq.count)
5174 iavf_shutdown_adminq(hw);
5175
5176 /* destroy the locks only once, here */
5177 mutex_destroy(&hw->aq.arq_mutex);
5178 mutex_destroy(&hw->aq.asq_mutex);
5179 mutex_destroy(&adapter->client_lock);
5180 mutex_unlock(&adapter->crit_lock);
5181 mutex_destroy(&adapter->crit_lock);
5182
5183 iounmap(hw->hw_addr);
5184 pci_release_regions(pdev);
5185 iavf_free_queues(adapter);
5186 kfree(adapter->vf_res);
5187 spin_lock_bh(&adapter->mac_vlan_list_lock);
5188 /* If we got removed before an up/down sequence, we've got a filter
5189 * hanging out there that we need to get rid of.
5190 */
5191 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
5192 list_del(&f->list);
5193 kfree(f);
5194 }
5195 list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
5196 list) {
5197 list_del(&vlf->list);
5198 kfree(vlf);
5199 }
5200
5201 spin_unlock_bh(&adapter->mac_vlan_list_lock);
5202
5203 spin_lock_bh(&adapter->cloud_filter_list_lock);
5204 list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
5205 list_del(&cf->list);
5206 kfree(cf);
5207 }
5208 spin_unlock_bh(&adapter->cloud_filter_list_lock);
5209
5210 spin_lock_bh(&adapter->fdir_fltr_lock);
5211 list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
5212 list_del(&fdir->list);
5213 kfree(fdir);
5214 }
5215 spin_unlock_bh(&adapter->fdir_fltr_lock);
5216
5217 spin_lock_bh(&adapter->adv_rss_lock);
5218 list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
5219 list) {
5220 list_del(&rss->list);
5221 kfree(rss);
5222 }
5223 spin_unlock_bh(&adapter->adv_rss_lock);
5224
5225 destroy_workqueue(adapter->wq);
5226
5227 free_netdev(netdev);
5228
5229 pci_disable_device(pdev);
5230 }
5231
5232 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
5233
5234 static struct pci_driver iavf_driver = {
5235 .name = iavf_driver_name,
5236 .id_table = iavf_pci_tbl,
5237 .probe = iavf_probe,
5238 .remove = iavf_remove,
5239 .driver.pm = &iavf_pm_ops,
5240 .shutdown = iavf_shutdown,
5241 };
5242
5243 /**
5244 * iavf_init_module - Driver Registration Routine
5245 *
5246 * iavf_init_module is the first routine called when the driver is
5247 * loaded. All it does is register with the PCI subsystem.
5248 **/
iavf_init_module(void)5249 static int __init iavf_init_module(void)
5250 {
5251 pr_info("iavf: %s\n", iavf_driver_string);
5252
5253 pr_info("%s\n", iavf_copyright);
5254
5255 return pci_register_driver(&iavf_driver);
5256 }
5257
5258 module_init(iavf_init_module);
5259
5260 /**
5261 * iavf_exit_module - Driver Exit Cleanup Routine
5262 *
5263 * iavf_exit_module is called just before the driver is removed
5264 * from memory.
5265 **/
iavf_exit_module(void)5266 static void __exit iavf_exit_module(void)
5267 {
5268 pci_unregister_driver(&iavf_driver);
5269 }
5270
5271 module_exit(iavf_exit_module);
5272
5273 /* iavf_main.c */
5274