1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
5 */
6
7 /* File aq_nic.c: Definition of common code for NIC. */
8
9 #include "aq_nic.h"
10 #include "aq_ring.h"
11 #include "aq_vec.h"
12 #include "aq_hw.h"
13 #include "aq_pci_func.h"
14 #include "aq_main.h"
15
16 #include <linux/moduleparam.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/timer.h>
20 #include <linux/cpu.h>
21 #include <linux/ip.h>
22 #include <linux/tcp.h>
23 #include <net/ip.h>
24
25 static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
26 module_param_named(aq_itr, aq_itr, uint, 0644);
27 MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
28
29 static unsigned int aq_itr_tx;
30 module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
31 MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
32
33 static unsigned int aq_itr_rx;
34 module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
35 MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
36
37 static void aq_nic_update_ndev_stats(struct aq_nic_s *self);
38
aq_nic_rss_init(struct aq_nic_s * self,unsigned int num_rss_queues)39 static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
40 {
41 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
42 struct aq_rss_parameters *rss_params = &cfg->aq_rss;
43 int i = 0;
44
45 static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = {
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
51 };
52
53 rss_params->hash_secret_key_size = sizeof(rss_key);
54 memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
55 rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
56
57 for (i = rss_params->indirection_table_size; i--;)
58 rss_params->indirection_table[i] = i & (num_rss_queues - 1);
59 }
60
61 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
aq_nic_cfg_start(struct aq_nic_s * self)62 void aq_nic_cfg_start(struct aq_nic_s *self)
63 {
64 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
65
66 cfg->tcs = AQ_CFG_TCS_DEF;
67
68 cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
69
70 cfg->itr = aq_itr;
71 cfg->tx_itr = aq_itr_tx;
72 cfg->rx_itr = aq_itr_rx;
73
74 cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
75 cfg->is_rss = AQ_CFG_IS_RSS_DEF;
76 cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
77 cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
78 cfg->flow_control = AQ_CFG_FC_MODE;
79
80 cfg->mtu = AQ_CFG_MTU_DEF;
81 cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
82 cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
83
84 cfg->is_lro = AQ_CFG_IS_LRO_DEF;
85
86 /*descriptors */
87 cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF);
88 cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF);
89
90 /*rss rings */
91 cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
92 cfg->vecs = min(cfg->vecs, num_online_cpus());
93 if (self->irqvecs > AQ_HW_SERVICE_IRQS)
94 cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
95 /* cfg->vecs should be power of 2 for RSS */
96 if (cfg->vecs >= 8U)
97 cfg->vecs = 8U;
98 else if (cfg->vecs >= 4U)
99 cfg->vecs = 4U;
100 else if (cfg->vecs >= 2U)
101 cfg->vecs = 2U;
102 else
103 cfg->vecs = 1U;
104
105 cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
106
107 aq_nic_rss_init(self, cfg->num_rss_queues);
108
109 cfg->irq_type = aq_pci_func_get_irq_type(self);
110
111 if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
112 (cfg->aq_hw_caps->vecs == 1U) ||
113 (cfg->vecs == 1U)) {
114 cfg->is_rss = 0U;
115 cfg->vecs = 1U;
116 }
117
118 /* Check if we have enough vectors allocated for
119 * link status IRQ. If no - we'll know link state from
120 * slower service task.
121 */
122 if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
123 cfg->link_irq_vec = cfg->vecs;
124 else
125 cfg->link_irq_vec = 0;
126
127 cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
128 cfg->features = cfg->aq_hw_caps->hw_features;
129 cfg->is_vlan_rx_strip = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_RX);
130 cfg->is_vlan_tx_insert = !!(cfg->features & NETIF_F_HW_VLAN_CTAG_TX);
131 cfg->is_vlan_force_promisc = true;
132 }
133
aq_nic_update_link_status(struct aq_nic_s * self)134 static int aq_nic_update_link_status(struct aq_nic_s *self)
135 {
136 int err = self->aq_fw_ops->update_link_status(self->aq_hw);
137 u32 fc = 0;
138
139 if (err)
140 return err;
141
142 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
143 pr_info("%s: link change old %d new %d\n",
144 AQ_CFG_DRV_NAME, self->link_status.mbps,
145 self->aq_hw->aq_link_status.mbps);
146 aq_nic_update_interrupt_moderation_settings(self);
147
148 /* Driver has to update flow control settings on RX block
149 * on any link event.
150 * We should query FW whether it negotiated FC.
151 */
152 if (self->aq_fw_ops->get_flow_control)
153 self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
154 if (self->aq_hw_ops->hw_set_fc)
155 self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
156 }
157
158 self->link_status = self->aq_hw->aq_link_status;
159 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
160 aq_utils_obj_set(&self->flags,
161 AQ_NIC_FLAG_STARTED);
162 aq_utils_obj_clear(&self->flags,
163 AQ_NIC_LINK_DOWN);
164 netif_carrier_on(self->ndev);
165 netif_tx_wake_all_queues(self->ndev);
166 }
167 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
168 netif_carrier_off(self->ndev);
169 netif_tx_disable(self->ndev);
170 aq_utils_obj_set(&self->flags, AQ_NIC_LINK_DOWN);
171 }
172 return 0;
173 }
174
aq_linkstate_threaded_isr(int irq,void * private)175 static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
176 {
177 struct aq_nic_s *self = private;
178
179 if (!self)
180 return IRQ_NONE;
181
182 aq_nic_update_link_status(self);
183
184 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
185 BIT(self->aq_nic_cfg.link_irq_vec));
186 return IRQ_HANDLED;
187 }
188
aq_nic_service_task(struct work_struct * work)189 static void aq_nic_service_task(struct work_struct *work)
190 {
191 struct aq_nic_s *self = container_of(work, struct aq_nic_s,
192 service_task);
193 int err;
194
195 if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
196 return;
197
198 err = aq_nic_update_link_status(self);
199 if (err)
200 return;
201
202 mutex_lock(&self->fwreq_mutex);
203 if (self->aq_fw_ops->update_stats)
204 self->aq_fw_ops->update_stats(self->aq_hw);
205 mutex_unlock(&self->fwreq_mutex);
206
207 aq_nic_update_ndev_stats(self);
208 }
209
aq_nic_service_timer_cb(struct timer_list * t)210 static void aq_nic_service_timer_cb(struct timer_list *t)
211 {
212 struct aq_nic_s *self = from_timer(self, t, service_timer);
213
214 mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
215
216 aq_ndev_schedule_work(&self->service_task);
217 }
218
aq_nic_polling_timer_cb(struct timer_list * t)219 static void aq_nic_polling_timer_cb(struct timer_list *t)
220 {
221 struct aq_nic_s *self = from_timer(self, t, polling_timer);
222 struct aq_vec_s *aq_vec = NULL;
223 unsigned int i = 0U;
224
225 for (i = 0U, aq_vec = self->aq_vec[0];
226 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
227 aq_vec_isr(i, (void *)aq_vec);
228
229 mod_timer(&self->polling_timer, jiffies +
230 AQ_CFG_POLLING_TIMER_INTERVAL);
231 }
232
aq_nic_ndev_register(struct aq_nic_s * self)233 int aq_nic_ndev_register(struct aq_nic_s *self)
234 {
235 int err = 0;
236
237 if (!self->ndev) {
238 err = -EINVAL;
239 goto err_exit;
240 }
241
242 err = hw_atl_utils_initfw(self->aq_hw, &self->aq_fw_ops);
243 if (err)
244 goto err_exit;
245
246 mutex_lock(&self->fwreq_mutex);
247 err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
248 self->ndev->dev_addr);
249 mutex_unlock(&self->fwreq_mutex);
250 if (err)
251 goto err_exit;
252
253 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
254 {
255 static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
256
257 ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
258 }
259 #endif
260
261 for (self->aq_vecs = 0; self->aq_vecs < aq_nic_get_cfg(self)->vecs;
262 self->aq_vecs++) {
263 self->aq_vec[self->aq_vecs] =
264 aq_vec_alloc(self, self->aq_vecs, aq_nic_get_cfg(self));
265 if (!self->aq_vec[self->aq_vecs]) {
266 err = -ENOMEM;
267 goto err_exit;
268 }
269 }
270
271 netif_carrier_off(self->ndev);
272
273 netif_tx_disable(self->ndev);
274
275 err = register_netdev(self->ndev);
276 if (err)
277 goto err_exit;
278
279 err_exit:
280 return err;
281 }
282
aq_nic_ndev_init(struct aq_nic_s * self)283 void aq_nic_ndev_init(struct aq_nic_s *self)
284 {
285 const struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
286 struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
287
288 self->ndev->hw_features |= aq_hw_caps->hw_features;
289 self->ndev->features = aq_hw_caps->hw_features;
290 self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
291 NETIF_F_RXHASH | NETIF_F_SG |
292 NETIF_F_LRO | NETIF_F_TSO;
293 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
294 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
295
296 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
297 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
298
299 }
300
aq_nic_set_tx_ring(struct aq_nic_s * self,unsigned int idx,struct aq_ring_s * ring)301 void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
302 struct aq_ring_s *ring)
303 {
304 self->aq_ring_tx[idx] = ring;
305 }
306
aq_nic_get_ndev(struct aq_nic_s * self)307 struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
308 {
309 return self->ndev;
310 }
311
aq_nic_init(struct aq_nic_s * self)312 int aq_nic_init(struct aq_nic_s *self)
313 {
314 struct aq_vec_s *aq_vec = NULL;
315 int err = 0;
316 unsigned int i = 0U;
317
318 self->power_state = AQ_HW_POWER_STATE_D0;
319 mutex_lock(&self->fwreq_mutex);
320 err = self->aq_hw_ops->hw_reset(self->aq_hw);
321 mutex_unlock(&self->fwreq_mutex);
322 if (err < 0)
323 goto err_exit;
324
325 err = self->aq_hw_ops->hw_init(self->aq_hw,
326 aq_nic_get_ndev(self)->dev_addr);
327 if (err < 0)
328 goto err_exit;
329
330 for (i = 0U, aq_vec = self->aq_vec[0];
331 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
332 aq_vec_init(aq_vec, self->aq_hw_ops, self->aq_hw);
333
334 netif_carrier_off(self->ndev);
335
336 err_exit:
337 return err;
338 }
339
aq_nic_start(struct aq_nic_s * self)340 int aq_nic_start(struct aq_nic_s *self)
341 {
342 struct aq_vec_s *aq_vec = NULL;
343 int err = 0;
344 unsigned int i = 0U;
345
346 err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
347 self->mc_list.ar,
348 self->mc_list.count);
349 if (err < 0)
350 goto err_exit;
351
352 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
353 self->packet_filter);
354 if (err < 0)
355 goto err_exit;
356
357 for (i = 0U, aq_vec = self->aq_vec[0];
358 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
359 err = aq_vec_start(aq_vec);
360 if (err < 0)
361 goto err_exit;
362 }
363
364 err = self->aq_hw_ops->hw_start(self->aq_hw);
365 if (err < 0)
366 goto err_exit;
367
368 err = aq_nic_update_interrupt_moderation_settings(self);
369 if (err)
370 goto err_exit;
371
372 INIT_WORK(&self->service_task, aq_nic_service_task);
373
374 timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
375 aq_nic_service_timer_cb(&self->service_timer);
376
377 if (self->aq_nic_cfg.is_polling) {
378 timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
379 mod_timer(&self->polling_timer, jiffies +
380 AQ_CFG_POLLING_TIMER_INTERVAL);
381 } else {
382 for (i = 0U, aq_vec = self->aq_vec[0];
383 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
384 err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
385 aq_vec_isr, aq_vec,
386 aq_vec_get_affinity_mask(aq_vec));
387 if (err < 0)
388 goto err_exit;
389 }
390
391 if (self->aq_nic_cfg.link_irq_vec) {
392 int irqvec = pci_irq_vector(self->pdev,
393 self->aq_nic_cfg.link_irq_vec);
394 err = request_threaded_irq(irqvec, NULL,
395 aq_linkstate_threaded_isr,
396 IRQF_SHARED | IRQF_ONESHOT,
397 self->ndev->name, self);
398 if (err < 0)
399 goto err_exit;
400 self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
401 }
402
403 err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
404 AQ_CFG_IRQ_MASK);
405 if (err < 0)
406 goto err_exit;
407 }
408
409 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
410 if (err < 0)
411 goto err_exit;
412
413 err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
414 if (err < 0)
415 goto err_exit;
416
417 netif_tx_start_all_queues(self->ndev);
418
419 err_exit:
420 return err;
421 }
422
aq_nic_map_skb(struct aq_nic_s * self,struct sk_buff * skb,struct aq_ring_s * ring)423 static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
424 struct sk_buff *skb,
425 struct aq_ring_s *ring)
426 {
427 unsigned int ret = 0U;
428 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
429 unsigned int frag_count = 0U;
430 unsigned int dx = ring->sw_tail;
431 struct aq_ring_buff_s *first = NULL;
432 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
433 bool need_context_tag = false;
434
435 dx_buff->flags = 0U;
436
437 if (unlikely(skb_is_gso(skb))) {
438 dx_buff->mss = skb_shinfo(skb)->gso_size;
439 dx_buff->is_gso = 1U;
440 dx_buff->len_pkt = skb->len;
441 dx_buff->len_l2 = ETH_HLEN;
442 dx_buff->len_l3 = ip_hdrlen(skb);
443 dx_buff->len_l4 = tcp_hdrlen(skb);
444 dx_buff->eop_index = 0xffffU;
445 dx_buff->is_ipv6 =
446 (ip_hdr(skb)->version == 6) ? 1U : 0U;
447 need_context_tag = true;
448 }
449
450 if (self->aq_nic_cfg.is_vlan_tx_insert && skb_vlan_tag_present(skb)) {
451 dx_buff->vlan_tx_tag = skb_vlan_tag_get(skb);
452 dx_buff->len_pkt = skb->len;
453 dx_buff->is_vlan = 1U;
454 need_context_tag = true;
455 }
456
457 if (need_context_tag) {
458 dx = aq_ring_next_dx(ring, dx);
459 dx_buff = &ring->buff_ring[dx];
460 dx_buff->flags = 0U;
461 ++ret;
462 }
463
464 dx_buff->len = skb_headlen(skb);
465 dx_buff->pa = dma_map_single(aq_nic_get_dev(self),
466 skb->data,
467 dx_buff->len,
468 DMA_TO_DEVICE);
469
470 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
471 goto exit;
472
473 first = dx_buff;
474 dx_buff->len_pkt = skb->len;
475 dx_buff->is_sop = 1U;
476 dx_buff->is_mapped = 1U;
477 ++ret;
478
479 if (skb->ip_summed == CHECKSUM_PARTIAL) {
480 dx_buff->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ?
481 1U : 0U;
482
483 if (ip_hdr(skb)->version == 4) {
484 dx_buff->is_tcp_cso =
485 (ip_hdr(skb)->protocol == IPPROTO_TCP) ?
486 1U : 0U;
487 dx_buff->is_udp_cso =
488 (ip_hdr(skb)->protocol == IPPROTO_UDP) ?
489 1U : 0U;
490 } else if (ip_hdr(skb)->version == 6) {
491 dx_buff->is_tcp_cso =
492 (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP) ?
493 1U : 0U;
494 dx_buff->is_udp_cso =
495 (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP) ?
496 1U : 0U;
497 }
498 }
499
500 for (; nr_frags--; ++frag_count) {
501 unsigned int frag_len = 0U;
502 unsigned int buff_offset = 0U;
503 unsigned int buff_size = 0U;
504 dma_addr_t frag_pa;
505 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
506
507 frag_len = skb_frag_size(frag);
508
509 while (frag_len) {
510 if (frag_len > AQ_CFG_TX_FRAME_MAX)
511 buff_size = AQ_CFG_TX_FRAME_MAX;
512 else
513 buff_size = frag_len;
514
515 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
516 frag,
517 buff_offset,
518 buff_size,
519 DMA_TO_DEVICE);
520
521 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
522 frag_pa)))
523 goto mapping_error;
524
525 dx = aq_ring_next_dx(ring, dx);
526 dx_buff = &ring->buff_ring[dx];
527
528 dx_buff->flags = 0U;
529 dx_buff->len = buff_size;
530 dx_buff->pa = frag_pa;
531 dx_buff->is_mapped = 1U;
532 dx_buff->eop_index = 0xffffU;
533
534 frag_len -= buff_size;
535 buff_offset += buff_size;
536
537 ++ret;
538 }
539 }
540
541 first->eop_index = dx;
542 dx_buff->is_eop = 1U;
543 dx_buff->skb = skb;
544 goto exit;
545
546 mapping_error:
547 for (dx = ring->sw_tail;
548 ret > 0;
549 --ret, dx = aq_ring_next_dx(ring, dx)) {
550 dx_buff = &ring->buff_ring[dx];
551
552 if (!dx_buff->is_gso && !dx_buff->is_vlan && dx_buff->pa) {
553 if (unlikely(dx_buff->is_sop)) {
554 dma_unmap_single(aq_nic_get_dev(self),
555 dx_buff->pa,
556 dx_buff->len,
557 DMA_TO_DEVICE);
558 } else {
559 dma_unmap_page(aq_nic_get_dev(self),
560 dx_buff->pa,
561 dx_buff->len,
562 DMA_TO_DEVICE);
563 }
564 }
565 }
566
567 exit:
568 return ret;
569 }
570
aq_nic_xmit(struct aq_nic_s * self,struct sk_buff * skb)571 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
572 {
573 struct aq_ring_s *ring = NULL;
574 unsigned int frags = 0U;
575 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
576 unsigned int tc = 0U;
577 int err = NETDEV_TX_OK;
578
579 frags = skb_shinfo(skb)->nr_frags + 1;
580
581 ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
582
583 if (frags > AQ_CFG_SKB_FRAGS_MAX) {
584 dev_kfree_skb_any(skb);
585 goto err_exit;
586 }
587
588 aq_ring_update_queue_state(ring);
589
590 /* Above status update may stop the queue. Check this. */
591 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
592 err = NETDEV_TX_BUSY;
593 goto err_exit;
594 }
595
596 frags = aq_nic_map_skb(self, skb, ring);
597
598 if (likely(frags)) {
599 err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
600 ring, frags);
601 if (err >= 0) {
602 ++ring->stats.tx.packets;
603 ring->stats.tx.bytes += skb->len;
604 }
605 } else {
606 err = NETDEV_TX_BUSY;
607 }
608
609 err_exit:
610 return err;
611 }
612
aq_nic_update_interrupt_moderation_settings(struct aq_nic_s * self)613 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
614 {
615 return self->aq_hw_ops->hw_interrupt_moderation_set(self->aq_hw);
616 }
617
aq_nic_set_packet_filter(struct aq_nic_s * self,unsigned int flags)618 int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
619 {
620 int err = 0;
621
622 err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, flags);
623 if (err < 0)
624 goto err_exit;
625
626 self->packet_filter = flags;
627
628 err_exit:
629 return err;
630 }
631
aq_nic_set_multicast_list(struct aq_nic_s * self,struct net_device * ndev)632 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
633 {
634 const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
635 struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
636 unsigned int packet_filter = ndev->flags;
637 struct netdev_hw_addr *ha = NULL;
638 unsigned int i = 0U;
639 int err = 0;
640
641 self->mc_list.count = 0;
642 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
643 packet_filter |= IFF_PROMISC;
644 } else {
645 netdev_for_each_uc_addr(ha, ndev) {
646 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
647 }
648 }
649
650 cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
651 if (cfg->is_mc_list_enabled) {
652 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
653 packet_filter |= IFF_ALLMULTI;
654 } else {
655 netdev_for_each_mc_addr(ha, ndev) {
656 ether_addr_copy(self->mc_list.ar[i++],
657 ha->addr);
658 }
659 }
660 }
661
662 if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
663 self->mc_list.count = i;
664 err = hw_ops->hw_multicast_list_set(self->aq_hw,
665 self->mc_list.ar,
666 self->mc_list.count);
667 if (err < 0)
668 return err;
669 }
670 return aq_nic_set_packet_filter(self, packet_filter);
671 }
672
aq_nic_set_mtu(struct aq_nic_s * self,int new_mtu)673 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
674 {
675 self->aq_nic_cfg.mtu = new_mtu;
676
677 return 0;
678 }
679
aq_nic_set_mac(struct aq_nic_s * self,struct net_device * ndev)680 int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
681 {
682 return self->aq_hw_ops->hw_set_mac_address(self->aq_hw, ndev->dev_addr);
683 }
684
aq_nic_get_link_speed(struct aq_nic_s * self)685 unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
686 {
687 return self->link_status.mbps;
688 }
689
aq_nic_get_regs(struct aq_nic_s * self,struct ethtool_regs * regs,void * p)690 int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
691 {
692 u32 *regs_buff = p;
693 int err = 0;
694
695 regs->version = 1;
696
697 err = self->aq_hw_ops->hw_get_regs(self->aq_hw,
698 self->aq_nic_cfg.aq_hw_caps,
699 regs_buff);
700 if (err < 0)
701 goto err_exit;
702
703 err_exit:
704 return err;
705 }
706
aq_nic_get_regs_count(struct aq_nic_s * self)707 int aq_nic_get_regs_count(struct aq_nic_s *self)
708 {
709 return self->aq_nic_cfg.aq_hw_caps->mac_regs_count;
710 }
711
aq_nic_get_stats(struct aq_nic_s * self,u64 * data)712 void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
713 {
714 unsigned int i = 0U;
715 unsigned int count = 0U;
716 struct aq_vec_s *aq_vec = NULL;
717 struct aq_stats_s *stats;
718
719 if (self->aq_fw_ops->update_stats) {
720 mutex_lock(&self->fwreq_mutex);
721 self->aq_fw_ops->update_stats(self->aq_hw);
722 mutex_unlock(&self->fwreq_mutex);
723 }
724 stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
725
726 if (!stats)
727 goto err_exit;
728
729 data[i] = stats->uprc + stats->mprc + stats->bprc;
730 data[++i] = stats->uprc;
731 data[++i] = stats->mprc;
732 data[++i] = stats->bprc;
733 data[++i] = stats->erpt;
734 data[++i] = stats->uptc + stats->mptc + stats->bptc;
735 data[++i] = stats->uptc;
736 data[++i] = stats->mptc;
737 data[++i] = stats->bptc;
738 data[++i] = stats->ubrc;
739 data[++i] = stats->ubtc;
740 data[++i] = stats->mbrc;
741 data[++i] = stats->mbtc;
742 data[++i] = stats->bbrc;
743 data[++i] = stats->bbtc;
744 data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
745 data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
746 data[++i] = stats->dma_pkt_rc;
747 data[++i] = stats->dma_pkt_tc;
748 data[++i] = stats->dma_oct_rc;
749 data[++i] = stats->dma_oct_tc;
750 data[++i] = stats->dpc;
751
752 i++;
753
754 data += i;
755
756 for (i = 0U, aq_vec = self->aq_vec[0];
757 aq_vec && self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
758 data += count;
759 aq_vec_get_sw_stats(aq_vec, data, &count);
760 }
761
762 err_exit:;
763 }
764
aq_nic_update_ndev_stats(struct aq_nic_s * self)765 static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
766 {
767 struct net_device *ndev = self->ndev;
768 struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
769
770 ndev->stats.rx_packets = stats->dma_pkt_rc;
771 ndev->stats.rx_bytes = stats->dma_oct_rc;
772 ndev->stats.rx_errors = stats->erpr;
773 ndev->stats.rx_dropped = stats->dpc;
774 ndev->stats.tx_packets = stats->dma_pkt_tc;
775 ndev->stats.tx_bytes = stats->dma_oct_tc;
776 ndev->stats.tx_errors = stats->erpt;
777 ndev->stats.multicast = stats->mprc;
778 }
779
aq_nic_get_link_ksettings(struct aq_nic_s * self,struct ethtool_link_ksettings * cmd)780 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
781 struct ethtool_link_ksettings *cmd)
782 {
783 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
784 cmd->base.port = PORT_FIBRE;
785 else
786 cmd->base.port = PORT_TP;
787 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
788 cmd->base.duplex = DUPLEX_FULL;
789 cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
790
791 ethtool_link_ksettings_zero_link_mode(cmd, supported);
792
793 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10G)
794 ethtool_link_ksettings_add_link_mode(cmd, supported,
795 10000baseT_Full);
796
797 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_5G)
798 ethtool_link_ksettings_add_link_mode(cmd, supported,
799 5000baseT_Full);
800
801 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_2GS)
802 ethtool_link_ksettings_add_link_mode(cmd, supported,
803 2500baseT_Full);
804
805 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G)
806 ethtool_link_ksettings_add_link_mode(cmd, supported,
807 1000baseT_Full);
808
809 if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
810 ethtool_link_ksettings_add_link_mode(cmd, supported,
811 100baseT_Full);
812
813 if (self->aq_nic_cfg.aq_hw_caps->flow_control)
814 ethtool_link_ksettings_add_link_mode(cmd, supported,
815 Pause);
816
817 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
818
819 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
820 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
821 else
822 ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
823
824 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
825
826 if (self->aq_nic_cfg.is_autoneg)
827 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
828
829 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
830 ethtool_link_ksettings_add_link_mode(cmd, advertising,
831 10000baseT_Full);
832
833 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
834 ethtool_link_ksettings_add_link_mode(cmd, advertising,
835 5000baseT_Full);
836
837 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2GS)
838 ethtool_link_ksettings_add_link_mode(cmd, advertising,
839 2500baseT_Full);
840
841 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
842 ethtool_link_ksettings_add_link_mode(cmd, advertising,
843 1000baseT_Full);
844
845 if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
846 ethtool_link_ksettings_add_link_mode(cmd, advertising,
847 100baseT_Full);
848
849 if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX)
850 ethtool_link_ksettings_add_link_mode(cmd, advertising,
851 Pause);
852
853 /* Asym is when either RX or TX, but not both */
854 if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
855 !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
856 ethtool_link_ksettings_add_link_mode(cmd, advertising,
857 Asym_Pause);
858
859 if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
860 ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
861 else
862 ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
863 }
864
aq_nic_set_link_ksettings(struct aq_nic_s * self,const struct ethtool_link_ksettings * cmd)865 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
866 const struct ethtool_link_ksettings *cmd)
867 {
868 u32 speed = 0U;
869 u32 rate = 0U;
870 int err = 0;
871
872 if (cmd->base.autoneg == AUTONEG_ENABLE) {
873 rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
874 self->aq_nic_cfg.is_autoneg = true;
875 } else {
876 speed = cmd->base.speed;
877
878 switch (speed) {
879 case SPEED_100:
880 rate = AQ_NIC_RATE_100M;
881 break;
882
883 case SPEED_1000:
884 rate = AQ_NIC_RATE_1G;
885 break;
886
887 case SPEED_2500:
888 rate = AQ_NIC_RATE_2GS;
889 break;
890
891 case SPEED_5000:
892 rate = AQ_NIC_RATE_5G;
893 break;
894
895 case SPEED_10000:
896 rate = AQ_NIC_RATE_10G;
897 break;
898
899 default:
900 err = -1;
901 goto err_exit;
902 break;
903 }
904 if (!(self->aq_nic_cfg.aq_hw_caps->link_speed_msk & rate)) {
905 err = -1;
906 goto err_exit;
907 }
908
909 self->aq_nic_cfg.is_autoneg = false;
910 }
911
912 mutex_lock(&self->fwreq_mutex);
913 err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
914 mutex_unlock(&self->fwreq_mutex);
915 if (err < 0)
916 goto err_exit;
917
918 self->aq_nic_cfg.link_speed_msk = rate;
919
920 err_exit:
921 return err;
922 }
923
aq_nic_get_cfg(struct aq_nic_s * self)924 struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
925 {
926 return &self->aq_nic_cfg;
927 }
928
aq_nic_get_fw_version(struct aq_nic_s * self)929 u32 aq_nic_get_fw_version(struct aq_nic_s *self)
930 {
931 u32 fw_version = 0U;
932
933 self->aq_hw_ops->hw_get_fw_version(self->aq_hw, &fw_version);
934
935 return fw_version;
936 }
937
aq_nic_stop(struct aq_nic_s * self)938 int aq_nic_stop(struct aq_nic_s *self)
939 {
940 struct aq_vec_s *aq_vec = NULL;
941 unsigned int i = 0U;
942
943 netif_tx_disable(self->ndev);
944 netif_carrier_off(self->ndev);
945
946 del_timer_sync(&self->service_timer);
947 cancel_work_sync(&self->service_task);
948
949 self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
950
951 if (self->aq_nic_cfg.is_polling)
952 del_timer_sync(&self->polling_timer);
953 else
954 aq_pci_func_free_irqs(self);
955
956 for (i = 0U, aq_vec = self->aq_vec[0];
957 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
958 aq_vec_stop(aq_vec);
959
960 return self->aq_hw_ops->hw_stop(self->aq_hw);
961 }
962
aq_nic_deinit(struct aq_nic_s * self)963 void aq_nic_deinit(struct aq_nic_s *self)
964 {
965 struct aq_vec_s *aq_vec = NULL;
966 unsigned int i = 0U;
967
968 if (!self)
969 goto err_exit;
970
971 for (i = 0U, aq_vec = self->aq_vec[0];
972 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
973 aq_vec_deinit(aq_vec);
974
975 if (likely(self->aq_fw_ops->deinit)) {
976 mutex_lock(&self->fwreq_mutex);
977 self->aq_fw_ops->deinit(self->aq_hw);
978 mutex_unlock(&self->fwreq_mutex);
979 }
980
981 if (self->power_state != AQ_HW_POWER_STATE_D0 ||
982 self->aq_hw->aq_nic_cfg->wol)
983 if (likely(self->aq_fw_ops->set_power)) {
984 mutex_lock(&self->fwreq_mutex);
985 self->aq_fw_ops->set_power(self->aq_hw,
986 self->power_state,
987 self->ndev->dev_addr);
988 mutex_unlock(&self->fwreq_mutex);
989 }
990
991
992 err_exit:;
993 }
994
aq_nic_free_vectors(struct aq_nic_s * self)995 void aq_nic_free_vectors(struct aq_nic_s *self)
996 {
997 unsigned int i = 0U;
998
999 if (!self)
1000 goto err_exit;
1001
1002 for (i = ARRAY_SIZE(self->aq_vec); i--;) {
1003 if (self->aq_vec[i]) {
1004 aq_vec_free(self->aq_vec[i]);
1005 self->aq_vec[i] = NULL;
1006 }
1007 }
1008
1009 err_exit:;
1010 }
1011
aq_nic_change_pm_state(struct aq_nic_s * self,pm_message_t * pm_msg)1012 int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
1013 {
1014 int err = 0;
1015
1016 if (!netif_running(self->ndev)) {
1017 err = 0;
1018 goto out;
1019 }
1020 rtnl_lock();
1021 if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
1022 self->power_state = AQ_HW_POWER_STATE_D3;
1023 netif_device_detach(self->ndev);
1024 netif_tx_stop_all_queues(self->ndev);
1025
1026 err = aq_nic_stop(self);
1027 if (err < 0)
1028 goto err_exit;
1029
1030 aq_nic_deinit(self);
1031 } else {
1032 err = aq_nic_init(self);
1033 if (err < 0)
1034 goto err_exit;
1035
1036 err = aq_nic_start(self);
1037 if (err < 0)
1038 goto err_exit;
1039
1040 netif_device_attach(self->ndev);
1041 netif_tx_start_all_queues(self->ndev);
1042 }
1043
1044 err_exit:
1045 rtnl_unlock();
1046 out:
1047 return err;
1048 }
1049
aq_nic_shutdown(struct aq_nic_s * self)1050 void aq_nic_shutdown(struct aq_nic_s *self)
1051 {
1052 int err = 0;
1053
1054 if (!self->ndev)
1055 return;
1056
1057 rtnl_lock();
1058
1059 netif_device_detach(self->ndev);
1060
1061 if (netif_running(self->ndev)) {
1062 err = aq_nic_stop(self);
1063 if (err < 0)
1064 goto err_exit;
1065 }
1066 aq_nic_deinit(self);
1067
1068 err_exit:
1069 rtnl_unlock();
1070 }
1071