1 /* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2019 Broadcom Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/gro.h>
41 #include <net/ip.h>
42 #include <net/tcp.h>
43 #include <net/udp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <net/udp_tunnel.h>
47 #include <linux/workqueue.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/log2.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool/helpers.h>
58 #include <linux/align.h>
59 #include <net/netdev_queues.h>
60
61 #include "bnxt_hsi.h"
62 #include "bnxt.h"
63 #include "bnxt_hwrm.h"
64 #include "bnxt_ulp.h"
65 #include "bnxt_sriov.h"
66 #include "bnxt_ethtool.h"
67 #include "bnxt_dcb.h"
68 #include "bnxt_xdp.h"
69 #include "bnxt_ptp.h"
70 #include "bnxt_vfr.h"
71 #include "bnxt_tc.h"
72 #include "bnxt_devlink.h"
73 #include "bnxt_debugfs.h"
74
75 #define BNXT_TX_TIMEOUT (5 * HZ)
76 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
77 NETIF_MSG_TX_ERR)
78
79 MODULE_LICENSE("GPL");
80 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
81
82 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
83 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
84 #define BNXT_RX_COPY_THRESH 256
85
86 #define BNXT_TX_PUSH_THRESH 164
87
88 /* indexed by enum board_idx */
89 static const struct {
90 char *name;
91 } board_info[] = {
92 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
93 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
94 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
95 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
96 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
97 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
98 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
99 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
100 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
101 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
102 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
103 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
104 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
105 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
106 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
107 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
108 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
109 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
110 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
111 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
112 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
113 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
114 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
115 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
116 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
117 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
118 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
119 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
120 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
121 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
122 [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
123 [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
124 [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
125 [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
126 [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
127 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
128 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
129 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
130 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
131 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
132 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
133 [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
134 [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
135 [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
136 [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
137 };
138
139 static const struct pci_device_id bnxt_pci_tbl[] = {
140 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
141 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
142 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
143 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
144 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
145 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
146 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
147 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
148 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
149 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
150 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
151 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
152 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
153 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
154 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
155 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
156 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
157 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
158 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
159 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
160 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
161 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
162 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
163 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
164 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
165 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
166 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
167 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
168 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
169 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
170 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
171 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
172 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
173 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
174 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
175 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
176 { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
177 { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
178 { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57502_NPAR },
179 { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
180 { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57508_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57502_NPAR },
182 { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
183 { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57508_NPAR },
184 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
185 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
186 #ifdef CONFIG_BNXT_SRIOV
187 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
188 { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
189 { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
190 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
191 { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
192 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
193 { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
194 { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
195 { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
196 { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
197 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
198 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
199 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
200 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
201 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
202 { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
203 { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
204 { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
205 { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
206 { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
207 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
208 #endif
209 { 0 }
210 };
211
212 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
213
214 static const u16 bnxt_vf_req_snif[] = {
215 HWRM_FUNC_CFG,
216 HWRM_FUNC_VF_CFG,
217 HWRM_PORT_PHY_QCFG,
218 HWRM_CFA_L2_FILTER_ALLOC,
219 };
220
221 static const u16 bnxt_async_events_arr[] = {
222 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
223 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
224 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
225 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
226 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
227 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
228 ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
229 ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
230 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
231 ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
232 ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
233 ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
234 ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
235 ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
236 ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
237 ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
238 };
239
240 static struct workqueue_struct *bnxt_pf_wq;
241
bnxt_vf_pciid(enum board_idx idx)242 static bool bnxt_vf_pciid(enum board_idx idx)
243 {
244 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
245 idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
246 idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
247 idx == NETXTREME_E_P5_VF_HV);
248 }
249
250 #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
251 #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
252 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
253
254 #define BNXT_CP_DB_IRQ_DIS(db) \
255 writel(DB_CP_IRQ_DIS_FLAGS, db)
256
257 #define BNXT_DB_CQ(db, idx) \
258 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
259
260 #define BNXT_DB_NQ_P5(db, idx) \
261 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), \
262 (db)->doorbell)
263
264 #define BNXT_DB_CQ_ARM(db, idx) \
265 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
266
267 #define BNXT_DB_NQ_ARM_P5(db, idx) \
268 bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
269 (db)->doorbell)
270
bnxt_db_nq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)271 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
272 {
273 if (bp->flags & BNXT_FLAG_CHIP_P5)
274 BNXT_DB_NQ_P5(db, idx);
275 else
276 BNXT_DB_CQ(db, idx);
277 }
278
bnxt_db_nq_arm(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)279 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
280 {
281 if (bp->flags & BNXT_FLAG_CHIP_P5)
282 BNXT_DB_NQ_ARM_P5(db, idx);
283 else
284 BNXT_DB_CQ_ARM(db, idx);
285 }
286
bnxt_db_cq(struct bnxt * bp,struct bnxt_db_info * db,u32 idx)287 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
288 {
289 if (bp->flags & BNXT_FLAG_CHIP_P5)
290 bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
291 RING_CMP(idx), db->doorbell);
292 else
293 BNXT_DB_CQ(db, idx);
294 }
295
bnxt_queue_fw_reset_work(struct bnxt * bp,unsigned long delay)296 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
297 {
298 if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
299 return;
300
301 if (BNXT_PF(bp))
302 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
303 else
304 schedule_delayed_work(&bp->fw_reset_task, delay);
305 }
306
__bnxt_queue_sp_work(struct bnxt * bp)307 static void __bnxt_queue_sp_work(struct bnxt *bp)
308 {
309 if (BNXT_PF(bp))
310 queue_work(bnxt_pf_wq, &bp->sp_task);
311 else
312 schedule_work(&bp->sp_task);
313 }
314
bnxt_queue_sp_work(struct bnxt * bp,unsigned int event)315 static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event)
316 {
317 set_bit(event, &bp->sp_event);
318 __bnxt_queue_sp_work(bp);
319 }
320
bnxt_sched_reset_rxr(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)321 static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
322 {
323 if (!rxr->bnapi->in_reset) {
324 rxr->bnapi->in_reset = true;
325 if (bp->flags & BNXT_FLAG_CHIP_P5)
326 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
327 else
328 set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
329 __bnxt_queue_sp_work(bp);
330 }
331 rxr->rx_next_cons = 0xffff;
332 }
333
bnxt_sched_reset_txr(struct bnxt * bp,struct bnxt_tx_ring_info * txr,int idx)334 void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
335 int idx)
336 {
337 struct bnxt_napi *bnapi = txr->bnapi;
338
339 if (bnapi->tx_fault)
340 return;
341
342 netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)",
343 txr->txq_index, bnapi->tx_pkts,
344 txr->tx_cons, txr->tx_prod, idx);
345 WARN_ON_ONCE(1);
346 bnapi->tx_fault = 1;
347 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
348 }
349
350 const u16 bnxt_lhint_arr[] = {
351 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
352 TX_BD_FLAGS_LHINT_512_TO_1023,
353 TX_BD_FLAGS_LHINT_1024_TO_2047,
354 TX_BD_FLAGS_LHINT_1024_TO_2047,
355 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
356 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
357 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
358 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
359 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
360 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
361 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
362 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
363 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
364 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
365 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
366 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
367 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
368 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
369 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
370 };
371
bnxt_xmit_get_cfa_action(struct sk_buff * skb)372 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
373 {
374 struct metadata_dst *md_dst = skb_metadata_dst(skb);
375
376 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
377 return 0;
378
379 return md_dst->u.port_info.port_id;
380 }
381
bnxt_txr_db_kick(struct bnxt * bp,struct bnxt_tx_ring_info * txr,u16 prod)382 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
383 u16 prod)
384 {
385 bnxt_db_write(bp, &txr->tx_db, prod);
386 txr->kick_pending = 0;
387 }
388
bnxt_start_xmit(struct sk_buff * skb,struct net_device * dev)389 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
390 {
391 struct bnxt *bp = netdev_priv(dev);
392 struct tx_bd *txbd;
393 struct tx_bd_ext *txbd1;
394 struct netdev_queue *txq;
395 int i;
396 dma_addr_t mapping;
397 unsigned int length, pad = 0;
398 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
399 u16 prod, last_frag;
400 struct pci_dev *pdev = bp->pdev;
401 struct bnxt_tx_ring_info *txr;
402 struct bnxt_sw_tx_bd *tx_buf;
403 __le32 lflags = 0;
404
405 i = skb_get_queue_mapping(skb);
406 if (unlikely(i >= bp->tx_nr_rings)) {
407 dev_kfree_skb_any(skb);
408 dev_core_stats_tx_dropped_inc(dev);
409 return NETDEV_TX_OK;
410 }
411
412 txq = netdev_get_tx_queue(dev, i);
413 txr = &bp->tx_ring[bp->tx_ring_map[i]];
414 prod = txr->tx_prod;
415
416 free_size = bnxt_tx_avail(bp, txr);
417 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
418 /* We must have raced with NAPI cleanup */
419 if (net_ratelimit() && txr->kick_pending)
420 netif_warn(bp, tx_err, dev,
421 "bnxt: ring busy w/ flush pending!\n");
422 if (!netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
423 bp->tx_wake_thresh))
424 return NETDEV_TX_BUSY;
425 }
426
427 if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
428 goto tx_free;
429
430 length = skb->len;
431 len = skb_headlen(skb);
432 last_frag = skb_shinfo(skb)->nr_frags;
433
434 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
435
436 txbd->tx_bd_opaque = prod;
437
438 tx_buf = &txr->tx_buf_ring[prod];
439 tx_buf->skb = skb;
440 tx_buf->nr_frags = last_frag;
441
442 vlan_tag_flags = 0;
443 cfa_action = bnxt_xmit_get_cfa_action(skb);
444 if (skb_vlan_tag_present(skb)) {
445 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
446 skb_vlan_tag_get(skb);
447 /* Currently supports 8021Q, 8021AD vlan offloads
448 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
449 */
450 if (skb->vlan_proto == htons(ETH_P_8021Q))
451 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
452 }
453
454 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
455 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
456
457 if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
458 atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
459 if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
460 &ptp->tx_hdr_off)) {
461 if (vlan_tag_flags)
462 ptp->tx_hdr_off += VLAN_HLEN;
463 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
464 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
465 } else {
466 atomic_inc(&bp->ptp_cfg->tx_avail);
467 }
468 }
469 }
470
471 if (unlikely(skb->no_fcs))
472 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
473
474 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
475 !lflags) {
476 struct tx_push_buffer *tx_push_buf = txr->tx_push;
477 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
478 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
479 void __iomem *db = txr->tx_db.doorbell;
480 void *pdata = tx_push_buf->data;
481 u64 *end;
482 int j, push_len;
483
484 /* Set COAL_NOW to be ready quickly for the next push */
485 tx_push->tx_bd_len_flags_type =
486 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
487 TX_BD_TYPE_LONG_TX_BD |
488 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
489 TX_BD_FLAGS_COAL_NOW |
490 TX_BD_FLAGS_PACKET_END |
491 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
492
493 if (skb->ip_summed == CHECKSUM_PARTIAL)
494 tx_push1->tx_bd_hsize_lflags =
495 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
496 else
497 tx_push1->tx_bd_hsize_lflags = 0;
498
499 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
500 tx_push1->tx_bd_cfa_action =
501 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
502
503 end = pdata + length;
504 end = PTR_ALIGN(end, 8) - 1;
505 *end = 0;
506
507 skb_copy_from_linear_data(skb, pdata, len);
508 pdata += len;
509 for (j = 0; j < last_frag; j++) {
510 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
511 void *fptr;
512
513 fptr = skb_frag_address_safe(frag);
514 if (!fptr)
515 goto normal_tx;
516
517 memcpy(pdata, fptr, skb_frag_size(frag));
518 pdata += skb_frag_size(frag);
519 }
520
521 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
522 txbd->tx_bd_haddr = txr->data_mapping;
523 prod = NEXT_TX(prod);
524 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
525 memcpy(txbd, tx_push1, sizeof(*txbd));
526 prod = NEXT_TX(prod);
527 tx_push->doorbell =
528 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
529 WRITE_ONCE(txr->tx_prod, prod);
530
531 tx_buf->is_push = 1;
532 netdev_tx_sent_queue(txq, skb->len);
533 wmb(); /* Sync is_push and byte queue before pushing data */
534
535 push_len = (length + sizeof(*tx_push) + 7) / 8;
536 if (push_len > 16) {
537 __iowrite64_copy(db, tx_push_buf, 16);
538 __iowrite32_copy(db + 4, tx_push_buf + 1,
539 (push_len - 16) << 1);
540 } else {
541 __iowrite64_copy(db, tx_push_buf, push_len);
542 }
543
544 goto tx_done;
545 }
546
547 normal_tx:
548 if (length < BNXT_MIN_PKT_SIZE) {
549 pad = BNXT_MIN_PKT_SIZE - length;
550 if (skb_pad(skb, pad))
551 /* SKB already freed. */
552 goto tx_kick_pending;
553 length = BNXT_MIN_PKT_SIZE;
554 }
555
556 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
557
558 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
559 goto tx_free;
560
561 dma_unmap_addr_set(tx_buf, mapping, mapping);
562 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
563 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
564
565 txbd->tx_bd_haddr = cpu_to_le64(mapping);
566
567 prod = NEXT_TX(prod);
568 txbd1 = (struct tx_bd_ext *)
569 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
570
571 txbd1->tx_bd_hsize_lflags = lflags;
572 if (skb_is_gso(skb)) {
573 u32 hdr_len;
574
575 if (skb->encapsulation)
576 hdr_len = skb_inner_tcp_all_headers(skb);
577 else
578 hdr_len = skb_tcp_all_headers(skb);
579
580 txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
581 TX_BD_FLAGS_T_IPID |
582 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
583 length = skb_shinfo(skb)->gso_size;
584 txbd1->tx_bd_mss = cpu_to_le32(length);
585 length += hdr_len;
586 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
587 txbd1->tx_bd_hsize_lflags |=
588 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
589 txbd1->tx_bd_mss = 0;
590 }
591
592 length >>= 9;
593 if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
594 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
595 skb->len);
596 i = 0;
597 goto tx_dma_error;
598 }
599 flags |= bnxt_lhint_arr[length];
600 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
601
602 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
603 txbd1->tx_bd_cfa_action =
604 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
605 for (i = 0; i < last_frag; i++) {
606 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
607
608 prod = NEXT_TX(prod);
609 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
610
611 len = skb_frag_size(frag);
612 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
613 DMA_TO_DEVICE);
614
615 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
616 goto tx_dma_error;
617
618 tx_buf = &txr->tx_buf_ring[prod];
619 dma_unmap_addr_set(tx_buf, mapping, mapping);
620
621 txbd->tx_bd_haddr = cpu_to_le64(mapping);
622
623 flags = len << TX_BD_LEN_SHIFT;
624 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
625 }
626
627 flags &= ~TX_BD_LEN;
628 txbd->tx_bd_len_flags_type =
629 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
630 TX_BD_FLAGS_PACKET_END);
631
632 netdev_tx_sent_queue(txq, skb->len);
633
634 skb_tx_timestamp(skb);
635
636 /* Sync BD data before updating doorbell */
637 wmb();
638
639 prod = NEXT_TX(prod);
640 WRITE_ONCE(txr->tx_prod, prod);
641
642 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
643 bnxt_txr_db_kick(bp, txr, prod);
644 else
645 txr->kick_pending = 1;
646
647 tx_done:
648
649 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
650 if (netdev_xmit_more() && !tx_buf->is_push)
651 bnxt_txr_db_kick(bp, txr, prod);
652
653 netif_txq_try_stop(txq, bnxt_tx_avail(bp, txr),
654 bp->tx_wake_thresh);
655 }
656 return NETDEV_TX_OK;
657
658 tx_dma_error:
659 if (BNXT_TX_PTP_IS_SET(lflags))
660 atomic_inc(&bp->ptp_cfg->tx_avail);
661
662 last_frag = i;
663
664 /* start back at beginning and unmap skb */
665 prod = txr->tx_prod;
666 tx_buf = &txr->tx_buf_ring[prod];
667 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
668 skb_headlen(skb), DMA_TO_DEVICE);
669 prod = NEXT_TX(prod);
670
671 /* unmap remaining mapped pages */
672 for (i = 0; i < last_frag; i++) {
673 prod = NEXT_TX(prod);
674 tx_buf = &txr->tx_buf_ring[prod];
675 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
676 skb_frag_size(&skb_shinfo(skb)->frags[i]),
677 DMA_TO_DEVICE);
678 }
679
680 tx_free:
681 dev_kfree_skb_any(skb);
682 tx_kick_pending:
683 if (txr->kick_pending)
684 bnxt_txr_db_kick(bp, txr, txr->tx_prod);
685 txr->tx_buf_ring[txr->tx_prod].skb = NULL;
686 dev_core_stats_tx_dropped_inc(dev);
687 return NETDEV_TX_OK;
688 }
689
bnxt_tx_int(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)690 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
691 {
692 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
693 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
694 u16 cons = txr->tx_cons;
695 struct pci_dev *pdev = bp->pdev;
696 int nr_pkts = bnapi->tx_pkts;
697 int i;
698 unsigned int tx_bytes = 0;
699
700 for (i = 0; i < nr_pkts; i++) {
701 struct bnxt_sw_tx_bd *tx_buf;
702 struct sk_buff *skb;
703 int j, last;
704
705 tx_buf = &txr->tx_buf_ring[cons];
706 cons = NEXT_TX(cons);
707 skb = tx_buf->skb;
708 tx_buf->skb = NULL;
709
710 if (unlikely(!skb)) {
711 bnxt_sched_reset_txr(bp, txr, i);
712 return;
713 }
714
715 tx_bytes += skb->len;
716
717 if (tx_buf->is_push) {
718 tx_buf->is_push = 0;
719 goto next_tx_int;
720 }
721
722 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
723 skb_headlen(skb), DMA_TO_DEVICE);
724 last = tx_buf->nr_frags;
725
726 for (j = 0; j < last; j++) {
727 cons = NEXT_TX(cons);
728 tx_buf = &txr->tx_buf_ring[cons];
729 dma_unmap_page(
730 &pdev->dev,
731 dma_unmap_addr(tx_buf, mapping),
732 skb_frag_size(&skb_shinfo(skb)->frags[j]),
733 DMA_TO_DEVICE);
734 }
735 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
736 if (bp->flags & BNXT_FLAG_CHIP_P5) {
737 /* PTP worker takes ownership of the skb */
738 if (!bnxt_get_tx_ts_p5(bp, skb))
739 skb = NULL;
740 else
741 atomic_inc(&bp->ptp_cfg->tx_avail);
742 }
743 }
744
745 next_tx_int:
746 cons = NEXT_TX(cons);
747
748 dev_consume_skb_any(skb);
749 }
750
751 bnapi->tx_pkts = 0;
752 WRITE_ONCE(txr->tx_cons, cons);
753
754 __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
755 bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
756 READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
757 }
758
__bnxt_alloc_rx_page(struct bnxt * bp,dma_addr_t * mapping,struct bnxt_rx_ring_info * rxr,unsigned int * offset,gfp_t gfp)759 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
760 struct bnxt_rx_ring_info *rxr,
761 unsigned int *offset,
762 gfp_t gfp)
763 {
764 struct page *page;
765
766 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
767 page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
768 BNXT_RX_PAGE_SIZE);
769 } else {
770 page = page_pool_dev_alloc_pages(rxr->page_pool);
771 *offset = 0;
772 }
773 if (!page)
774 return NULL;
775
776 *mapping = page_pool_get_dma_addr(page) + *offset;
777 return page;
778 }
779
__bnxt_alloc_rx_frag(struct bnxt * bp,dma_addr_t * mapping,gfp_t gfp)780 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
781 gfp_t gfp)
782 {
783 u8 *data;
784 struct pci_dev *pdev = bp->pdev;
785
786 if (gfp == GFP_ATOMIC)
787 data = napi_alloc_frag(bp->rx_buf_size);
788 else
789 data = netdev_alloc_frag(bp->rx_buf_size);
790 if (!data)
791 return NULL;
792
793 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
794 bp->rx_buf_use_size, bp->rx_dir,
795 DMA_ATTR_WEAK_ORDERING);
796
797 if (dma_mapping_error(&pdev->dev, *mapping)) {
798 skb_free_frag(data);
799 data = NULL;
800 }
801 return data;
802 }
803
bnxt_alloc_rx_data(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)804 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
805 u16 prod, gfp_t gfp)
806 {
807 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
808 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
809 dma_addr_t mapping;
810
811 if (BNXT_RX_PAGE_MODE(bp)) {
812 unsigned int offset;
813 struct page *page =
814 __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
815
816 if (!page)
817 return -ENOMEM;
818
819 mapping += bp->rx_dma_offset;
820 rx_buf->data = page;
821 rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
822 } else {
823 u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
824
825 if (!data)
826 return -ENOMEM;
827
828 rx_buf->data = data;
829 rx_buf->data_ptr = data + bp->rx_offset;
830 }
831 rx_buf->mapping = mapping;
832
833 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
834 return 0;
835 }
836
bnxt_reuse_rx_data(struct bnxt_rx_ring_info * rxr,u16 cons,void * data)837 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
838 {
839 u16 prod = rxr->rx_prod;
840 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
841 struct rx_bd *cons_bd, *prod_bd;
842
843 prod_rx_buf = &rxr->rx_buf_ring[prod];
844 cons_rx_buf = &rxr->rx_buf_ring[cons];
845
846 prod_rx_buf->data = data;
847 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
848
849 prod_rx_buf->mapping = cons_rx_buf->mapping;
850
851 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
852 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
853
854 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
855 }
856
bnxt_find_next_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)857 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
858 {
859 u16 next, max = rxr->rx_agg_bmap_size;
860
861 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
862 if (next >= max)
863 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
864 return next;
865 }
866
bnxt_alloc_rx_page(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 prod,gfp_t gfp)867 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
868 struct bnxt_rx_ring_info *rxr,
869 u16 prod, gfp_t gfp)
870 {
871 struct rx_bd *rxbd =
872 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
873 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
874 struct page *page;
875 dma_addr_t mapping;
876 u16 sw_prod = rxr->rx_sw_agg_prod;
877 unsigned int offset = 0;
878
879 page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
880
881 if (!page)
882 return -ENOMEM;
883
884 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
885 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
886
887 __set_bit(sw_prod, rxr->rx_agg_bmap);
888 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
889 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
890
891 rx_agg_buf->page = page;
892 rx_agg_buf->offset = offset;
893 rx_agg_buf->mapping = mapping;
894 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
895 rxbd->rx_bd_opaque = sw_prod;
896 return 0;
897 }
898
bnxt_get_agg(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u16 cp_cons,u16 curr)899 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
900 struct bnxt_cp_ring_info *cpr,
901 u16 cp_cons, u16 curr)
902 {
903 struct rx_agg_cmp *agg;
904
905 cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
906 agg = (struct rx_agg_cmp *)
907 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
908 return agg;
909 }
910
bnxt_get_tpa_agg_p5(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 agg_id,u16 curr)911 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
912 struct bnxt_rx_ring_info *rxr,
913 u16 agg_id, u16 curr)
914 {
915 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
916
917 return &tpa_info->agg_arr[curr];
918 }
919
bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)920 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
921 u16 start, u32 agg_bufs, bool tpa)
922 {
923 struct bnxt_napi *bnapi = cpr->bnapi;
924 struct bnxt *bp = bnapi->bp;
925 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
926 u16 prod = rxr->rx_agg_prod;
927 u16 sw_prod = rxr->rx_sw_agg_prod;
928 bool p5_tpa = false;
929 u32 i;
930
931 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
932 p5_tpa = true;
933
934 for (i = 0; i < agg_bufs; i++) {
935 u16 cons;
936 struct rx_agg_cmp *agg;
937 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
938 struct rx_bd *prod_bd;
939 struct page *page;
940
941 if (p5_tpa)
942 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
943 else
944 agg = bnxt_get_agg(bp, cpr, idx, start + i);
945 cons = agg->rx_agg_cmp_opaque;
946 __clear_bit(cons, rxr->rx_agg_bmap);
947
948 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
949 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
950
951 __set_bit(sw_prod, rxr->rx_agg_bmap);
952 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
953 cons_rx_buf = &rxr->rx_agg_ring[cons];
954
955 /* It is possible for sw_prod to be equal to cons, so
956 * set cons_rx_buf->page to NULL first.
957 */
958 page = cons_rx_buf->page;
959 cons_rx_buf->page = NULL;
960 prod_rx_buf->page = page;
961 prod_rx_buf->offset = cons_rx_buf->offset;
962
963 prod_rx_buf->mapping = cons_rx_buf->mapping;
964
965 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
966
967 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
968 prod_bd->rx_bd_opaque = sw_prod;
969
970 prod = NEXT_RX_AGG(prod);
971 sw_prod = NEXT_RX_AGG(sw_prod);
972 }
973 rxr->rx_agg_prod = prod;
974 rxr->rx_sw_agg_prod = sw_prod;
975 }
976
bnxt_rx_multi_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)977 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
978 struct bnxt_rx_ring_info *rxr,
979 u16 cons, void *data, u8 *data_ptr,
980 dma_addr_t dma_addr,
981 unsigned int offset_and_len)
982 {
983 unsigned int len = offset_and_len & 0xffff;
984 struct page *page = data;
985 u16 prod = rxr->rx_prod;
986 struct sk_buff *skb;
987 int err;
988
989 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
990 if (unlikely(err)) {
991 bnxt_reuse_rx_data(rxr, cons, data);
992 return NULL;
993 }
994 dma_addr -= bp->rx_dma_offset;
995 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
996 bp->rx_dir);
997 skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
998 if (!skb) {
999 page_pool_recycle_direct(rxr->page_pool, page);
1000 return NULL;
1001 }
1002 skb_mark_for_recycle(skb);
1003 skb_reserve(skb, bp->rx_offset);
1004 __skb_put(skb, len);
1005
1006 return skb;
1007 }
1008
bnxt_rx_page_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1009 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1010 struct bnxt_rx_ring_info *rxr,
1011 u16 cons, void *data, u8 *data_ptr,
1012 dma_addr_t dma_addr,
1013 unsigned int offset_and_len)
1014 {
1015 unsigned int payload = offset_and_len >> 16;
1016 unsigned int len = offset_and_len & 0xffff;
1017 skb_frag_t *frag;
1018 struct page *page = data;
1019 u16 prod = rxr->rx_prod;
1020 struct sk_buff *skb;
1021 int off, err;
1022
1023 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1024 if (unlikely(err)) {
1025 bnxt_reuse_rx_data(rxr, cons, data);
1026 return NULL;
1027 }
1028 dma_addr -= bp->rx_dma_offset;
1029 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
1030 bp->rx_dir);
1031
1032 if (unlikely(!payload))
1033 payload = eth_get_headlen(bp->dev, data_ptr, len);
1034
1035 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1036 if (!skb) {
1037 page_pool_recycle_direct(rxr->page_pool, page);
1038 return NULL;
1039 }
1040
1041 skb_mark_for_recycle(skb);
1042 off = (void *)data_ptr - page_address(page);
1043 skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
1044 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1045 payload + NET_IP_ALIGN);
1046
1047 frag = &skb_shinfo(skb)->frags[0];
1048 skb_frag_size_sub(frag, payload);
1049 skb_frag_off_add(frag, payload);
1050 skb->data_len -= payload;
1051 skb->tail += payload;
1052
1053 return skb;
1054 }
1055
bnxt_rx_skb(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int offset_and_len)1056 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1057 struct bnxt_rx_ring_info *rxr, u16 cons,
1058 void *data, u8 *data_ptr,
1059 dma_addr_t dma_addr,
1060 unsigned int offset_and_len)
1061 {
1062 u16 prod = rxr->rx_prod;
1063 struct sk_buff *skb;
1064 int err;
1065
1066 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1067 if (unlikely(err)) {
1068 bnxt_reuse_rx_data(rxr, cons, data);
1069 return NULL;
1070 }
1071
1072 skb = napi_build_skb(data, bp->rx_buf_size);
1073 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1074 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1075 if (!skb) {
1076 skb_free_frag(data);
1077 return NULL;
1078 }
1079
1080 skb_reserve(skb, bp->rx_offset);
1081 skb_put(skb, offset_and_len & 0xffff);
1082 return skb;
1083 }
1084
__bnxt_rx_agg_pages(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct skb_shared_info * shinfo,u16 idx,u32 agg_bufs,bool tpa,struct xdp_buff * xdp)1085 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1086 struct bnxt_cp_ring_info *cpr,
1087 struct skb_shared_info *shinfo,
1088 u16 idx, u32 agg_bufs, bool tpa,
1089 struct xdp_buff *xdp)
1090 {
1091 struct bnxt_napi *bnapi = cpr->bnapi;
1092 struct pci_dev *pdev = bp->pdev;
1093 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1094 u16 prod = rxr->rx_agg_prod;
1095 u32 i, total_frag_len = 0;
1096 bool p5_tpa = false;
1097
1098 if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1099 p5_tpa = true;
1100
1101 for (i = 0; i < agg_bufs; i++) {
1102 skb_frag_t *frag = &shinfo->frags[i];
1103 u16 cons, frag_len;
1104 struct rx_agg_cmp *agg;
1105 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1106 struct page *page;
1107 dma_addr_t mapping;
1108
1109 if (p5_tpa)
1110 agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1111 else
1112 agg = bnxt_get_agg(bp, cpr, idx, i);
1113 cons = agg->rx_agg_cmp_opaque;
1114 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1115 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1116
1117 cons_rx_buf = &rxr->rx_agg_ring[cons];
1118 skb_frag_fill_page_desc(frag, cons_rx_buf->page,
1119 cons_rx_buf->offset, frag_len);
1120 shinfo->nr_frags = i + 1;
1121 __clear_bit(cons, rxr->rx_agg_bmap);
1122
1123 /* It is possible for bnxt_alloc_rx_page() to allocate
1124 * a sw_prod index that equals the cons index, so we
1125 * need to clear the cons entry now.
1126 */
1127 mapping = cons_rx_buf->mapping;
1128 page = cons_rx_buf->page;
1129 cons_rx_buf->page = NULL;
1130
1131 if (xdp && page_is_pfmemalloc(page))
1132 xdp_buff_set_frag_pfmemalloc(xdp);
1133
1134 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1135 --shinfo->nr_frags;
1136 cons_rx_buf->page = page;
1137
1138 /* Update prod since possibly some pages have been
1139 * allocated already.
1140 */
1141 rxr->rx_agg_prod = prod;
1142 bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1143 return 0;
1144 }
1145
1146 dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1147 bp->rx_dir);
1148
1149 total_frag_len += frag_len;
1150 prod = NEXT_RX_AGG(prod);
1151 }
1152 rxr->rx_agg_prod = prod;
1153 return total_frag_len;
1154 }
1155
bnxt_rx_agg_pages_skb(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)1156 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1157 struct bnxt_cp_ring_info *cpr,
1158 struct sk_buff *skb, u16 idx,
1159 u32 agg_bufs, bool tpa)
1160 {
1161 struct skb_shared_info *shinfo = skb_shinfo(skb);
1162 u32 total_frag_len = 0;
1163
1164 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1165 agg_bufs, tpa, NULL);
1166 if (!total_frag_len) {
1167 skb_mark_for_recycle(skb);
1168 dev_kfree_skb(skb);
1169 return NULL;
1170 }
1171
1172 skb->data_len += total_frag_len;
1173 skb->len += total_frag_len;
1174 skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
1175 return skb;
1176 }
1177
bnxt_rx_agg_pages_xdp(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,struct xdp_buff * xdp,u16 idx,u32 agg_bufs,bool tpa)1178 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1179 struct bnxt_cp_ring_info *cpr,
1180 struct xdp_buff *xdp, u16 idx,
1181 u32 agg_bufs, bool tpa)
1182 {
1183 struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1184 u32 total_frag_len = 0;
1185
1186 if (!xdp_buff_has_frags(xdp))
1187 shinfo->nr_frags = 0;
1188
1189 total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1190 idx, agg_bufs, tpa, xdp);
1191 if (total_frag_len) {
1192 xdp_buff_set_frags_flag(xdp);
1193 shinfo->nr_frags = agg_bufs;
1194 shinfo->xdp_frags_size = total_frag_len;
1195 }
1196 return total_frag_len;
1197 }
1198
bnxt_agg_bufs_valid(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)1199 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1200 u8 agg_bufs, u32 *raw_cons)
1201 {
1202 u16 last;
1203 struct rx_agg_cmp *agg;
1204
1205 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1206 last = RING_CMP(*raw_cons);
1207 agg = (struct rx_agg_cmp *)
1208 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1209 return RX_AGG_CMP_VALID(agg, *raw_cons);
1210 }
1211
bnxt_copy_skb(struct bnxt_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)1212 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1213 unsigned int len,
1214 dma_addr_t mapping)
1215 {
1216 struct bnxt *bp = bnapi->bp;
1217 struct pci_dev *pdev = bp->pdev;
1218 struct sk_buff *skb;
1219
1220 skb = napi_alloc_skb(&bnapi->napi, len);
1221 if (!skb)
1222 return NULL;
1223
1224 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1225 bp->rx_dir);
1226
1227 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1228 len + NET_IP_ALIGN);
1229
1230 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1231 bp->rx_dir);
1232
1233 skb_put(skb, len);
1234 return skb;
1235 }
1236
bnxt_discard_rx(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,void * cmp)1237 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1238 u32 *raw_cons, void *cmp)
1239 {
1240 struct rx_cmp *rxcmp = cmp;
1241 u32 tmp_raw_cons = *raw_cons;
1242 u8 cmp_type, agg_bufs = 0;
1243
1244 cmp_type = RX_CMP_TYPE(rxcmp);
1245
1246 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1247 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1248 RX_CMP_AGG_BUFS) >>
1249 RX_CMP_AGG_BUFS_SHIFT;
1250 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1251 struct rx_tpa_end_cmp *tpa_end = cmp;
1252
1253 if (bp->flags & BNXT_FLAG_CHIP_P5)
1254 return 0;
1255
1256 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1257 }
1258
1259 if (agg_bufs) {
1260 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1261 return -EBUSY;
1262 }
1263 *raw_cons = tmp_raw_cons;
1264 return 0;
1265 }
1266
bnxt_alloc_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1267 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1268 {
1269 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1270 u16 idx = agg_id & MAX_TPA_P5_MASK;
1271
1272 if (test_bit(idx, map->agg_idx_bmap))
1273 idx = find_first_zero_bit(map->agg_idx_bmap,
1274 BNXT_AGG_IDX_BMAP_SIZE);
1275 __set_bit(idx, map->agg_idx_bmap);
1276 map->agg_id_tbl[agg_id] = idx;
1277 return idx;
1278 }
1279
bnxt_free_agg_idx(struct bnxt_rx_ring_info * rxr,u16 idx)1280 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1281 {
1282 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1283
1284 __clear_bit(idx, map->agg_idx_bmap);
1285 }
1286
bnxt_lookup_agg_idx(struct bnxt_rx_ring_info * rxr,u16 agg_id)1287 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1288 {
1289 struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1290
1291 return map->agg_id_tbl[agg_id];
1292 }
1293
bnxt_tpa_start(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)1294 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1295 struct rx_tpa_start_cmp *tpa_start,
1296 struct rx_tpa_start_cmp_ext *tpa_start1)
1297 {
1298 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1299 struct bnxt_tpa_info *tpa_info;
1300 u16 cons, prod, agg_id;
1301 struct rx_bd *prod_bd;
1302 dma_addr_t mapping;
1303
1304 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1305 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1306 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1307 } else {
1308 agg_id = TPA_START_AGG_ID(tpa_start);
1309 }
1310 cons = tpa_start->rx_tpa_start_cmp_opaque;
1311 prod = rxr->rx_prod;
1312 cons_rx_buf = &rxr->rx_buf_ring[cons];
1313 prod_rx_buf = &rxr->rx_buf_ring[prod];
1314 tpa_info = &rxr->rx_tpa[agg_id];
1315
1316 if (unlikely(cons != rxr->rx_next_cons ||
1317 TPA_START_ERROR(tpa_start))) {
1318 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1319 cons, rxr->rx_next_cons,
1320 TPA_START_ERROR_CODE(tpa_start1));
1321 bnxt_sched_reset_rxr(bp, rxr);
1322 return;
1323 }
1324 /* Store cfa_code in tpa_info to use in tpa_end
1325 * completion processing.
1326 */
1327 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1328 prod_rx_buf->data = tpa_info->data;
1329 prod_rx_buf->data_ptr = tpa_info->data_ptr;
1330
1331 mapping = tpa_info->mapping;
1332 prod_rx_buf->mapping = mapping;
1333
1334 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1335
1336 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1337
1338 tpa_info->data = cons_rx_buf->data;
1339 tpa_info->data_ptr = cons_rx_buf->data_ptr;
1340 cons_rx_buf->data = NULL;
1341 tpa_info->mapping = cons_rx_buf->mapping;
1342
1343 tpa_info->len =
1344 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1345 RX_TPA_START_CMP_LEN_SHIFT;
1346 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1347 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1348
1349 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1350 tpa_info->gso_type = SKB_GSO_TCPV4;
1351 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1352 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1353 tpa_info->gso_type = SKB_GSO_TCPV6;
1354 tpa_info->rss_hash =
1355 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1356 } else {
1357 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1358 tpa_info->gso_type = 0;
1359 netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1360 }
1361 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1362 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1363 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1364 tpa_info->agg_count = 0;
1365
1366 rxr->rx_prod = NEXT_RX(prod);
1367 cons = NEXT_RX(cons);
1368 rxr->rx_next_cons = NEXT_RX(cons);
1369 cons_rx_buf = &rxr->rx_buf_ring[cons];
1370
1371 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1372 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1373 cons_rx_buf->data = NULL;
1374 }
1375
bnxt_abort_tpa(struct bnxt_cp_ring_info * cpr,u16 idx,u32 agg_bufs)1376 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1377 {
1378 if (agg_bufs)
1379 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1380 }
1381
1382 #ifdef CONFIG_INET
bnxt_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)1383 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1384 {
1385 struct udphdr *uh = NULL;
1386
1387 if (ip_proto == htons(ETH_P_IP)) {
1388 struct iphdr *iph = (struct iphdr *)skb->data;
1389
1390 if (iph->protocol == IPPROTO_UDP)
1391 uh = (struct udphdr *)(iph + 1);
1392 } else {
1393 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1394
1395 if (iph->nexthdr == IPPROTO_UDP)
1396 uh = (struct udphdr *)(iph + 1);
1397 }
1398 if (uh) {
1399 if (uh->check)
1400 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1401 else
1402 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1403 }
1404 }
1405 #endif
1406
bnxt_gro_func_5731x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1407 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1408 int payload_off, int tcp_ts,
1409 struct sk_buff *skb)
1410 {
1411 #ifdef CONFIG_INET
1412 struct tcphdr *th;
1413 int len, nw_off;
1414 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1415 u32 hdr_info = tpa_info->hdr_info;
1416 bool loopback = false;
1417
1418 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1419 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1420 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1421
1422 /* If the packet is an internal loopback packet, the offsets will
1423 * have an extra 4 bytes.
1424 */
1425 if (inner_mac_off == 4) {
1426 loopback = true;
1427 } else if (inner_mac_off > 4) {
1428 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1429 ETH_HLEN - 2));
1430
1431 /* We only support inner iPv4/ipv6. If we don't see the
1432 * correct protocol ID, it must be a loopback packet where
1433 * the offsets are off by 4.
1434 */
1435 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1436 loopback = true;
1437 }
1438 if (loopback) {
1439 /* internal loopback packet, subtract all offsets by 4 */
1440 inner_ip_off -= 4;
1441 inner_mac_off -= 4;
1442 outer_ip_off -= 4;
1443 }
1444
1445 nw_off = inner_ip_off - ETH_HLEN;
1446 skb_set_network_header(skb, nw_off);
1447 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1448 struct ipv6hdr *iph = ipv6_hdr(skb);
1449
1450 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1451 len = skb->len - skb_transport_offset(skb);
1452 th = tcp_hdr(skb);
1453 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1454 } else {
1455 struct iphdr *iph = ip_hdr(skb);
1456
1457 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1458 len = skb->len - skb_transport_offset(skb);
1459 th = tcp_hdr(skb);
1460 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1461 }
1462
1463 if (inner_mac_off) { /* tunnel */
1464 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1465 ETH_HLEN - 2));
1466
1467 bnxt_gro_tunnel(skb, proto);
1468 }
1469 #endif
1470 return skb;
1471 }
1472
bnxt_gro_func_5750x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1473 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1474 int payload_off, int tcp_ts,
1475 struct sk_buff *skb)
1476 {
1477 #ifdef CONFIG_INET
1478 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1479 u32 hdr_info = tpa_info->hdr_info;
1480 int iphdr_len, nw_off;
1481
1482 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1483 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1484 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1485
1486 nw_off = inner_ip_off - ETH_HLEN;
1487 skb_set_network_header(skb, nw_off);
1488 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1489 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1490 skb_set_transport_header(skb, nw_off + iphdr_len);
1491
1492 if (inner_mac_off) { /* tunnel */
1493 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1494 ETH_HLEN - 2));
1495
1496 bnxt_gro_tunnel(skb, proto);
1497 }
1498 #endif
1499 return skb;
1500 }
1501
1502 #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1503 #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1504
bnxt_gro_func_5730x(struct bnxt_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)1505 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1506 int payload_off, int tcp_ts,
1507 struct sk_buff *skb)
1508 {
1509 #ifdef CONFIG_INET
1510 struct tcphdr *th;
1511 int len, nw_off, tcp_opt_len = 0;
1512
1513 if (tcp_ts)
1514 tcp_opt_len = 12;
1515
1516 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1517 struct iphdr *iph;
1518
1519 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1520 ETH_HLEN;
1521 skb_set_network_header(skb, nw_off);
1522 iph = ip_hdr(skb);
1523 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1524 len = skb->len - skb_transport_offset(skb);
1525 th = tcp_hdr(skb);
1526 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1527 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1528 struct ipv6hdr *iph;
1529
1530 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1531 ETH_HLEN;
1532 skb_set_network_header(skb, nw_off);
1533 iph = ipv6_hdr(skb);
1534 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1535 len = skb->len - skb_transport_offset(skb);
1536 th = tcp_hdr(skb);
1537 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1538 } else {
1539 dev_kfree_skb_any(skb);
1540 return NULL;
1541 }
1542
1543 if (nw_off) /* tunnel */
1544 bnxt_gro_tunnel(skb, skb->protocol);
1545 #endif
1546 return skb;
1547 }
1548
bnxt_gro_skb(struct bnxt * bp,struct bnxt_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)1549 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1550 struct bnxt_tpa_info *tpa_info,
1551 struct rx_tpa_end_cmp *tpa_end,
1552 struct rx_tpa_end_cmp_ext *tpa_end1,
1553 struct sk_buff *skb)
1554 {
1555 #ifdef CONFIG_INET
1556 int payload_off;
1557 u16 segs;
1558
1559 segs = TPA_END_TPA_SEGS(tpa_end);
1560 if (segs == 1)
1561 return skb;
1562
1563 NAPI_GRO_CB(skb)->count = segs;
1564 skb_shinfo(skb)->gso_size =
1565 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1566 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1567 if (bp->flags & BNXT_FLAG_CHIP_P5)
1568 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1569 else
1570 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1571 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1572 if (likely(skb))
1573 tcp_gro_complete(skb);
1574 #endif
1575 return skb;
1576 }
1577
1578 /* Given the cfa_code of a received packet determine which
1579 * netdev (vf-rep or PF) the packet is destined to.
1580 */
bnxt_get_pkt_dev(struct bnxt * bp,u16 cfa_code)1581 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1582 {
1583 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1584
1585 /* if vf-rep dev is NULL, the must belongs to the PF */
1586 return dev ? dev : bp->dev;
1587 }
1588
bnxt_tpa_end(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)1589 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1590 struct bnxt_cp_ring_info *cpr,
1591 u32 *raw_cons,
1592 struct rx_tpa_end_cmp *tpa_end,
1593 struct rx_tpa_end_cmp_ext *tpa_end1,
1594 u8 *event)
1595 {
1596 struct bnxt_napi *bnapi = cpr->bnapi;
1597 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1598 u8 *data_ptr, agg_bufs;
1599 unsigned int len;
1600 struct bnxt_tpa_info *tpa_info;
1601 dma_addr_t mapping;
1602 struct sk_buff *skb;
1603 u16 idx = 0, agg_id;
1604 void *data;
1605 bool gro;
1606
1607 if (unlikely(bnapi->in_reset)) {
1608 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1609
1610 if (rc < 0)
1611 return ERR_PTR(-EBUSY);
1612 return NULL;
1613 }
1614
1615 if (bp->flags & BNXT_FLAG_CHIP_P5) {
1616 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1617 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1618 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1619 tpa_info = &rxr->rx_tpa[agg_id];
1620 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1621 netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1622 agg_bufs, tpa_info->agg_count);
1623 agg_bufs = tpa_info->agg_count;
1624 }
1625 tpa_info->agg_count = 0;
1626 *event |= BNXT_AGG_EVENT;
1627 bnxt_free_agg_idx(rxr, agg_id);
1628 idx = agg_id;
1629 gro = !!(bp->flags & BNXT_FLAG_GRO);
1630 } else {
1631 agg_id = TPA_END_AGG_ID(tpa_end);
1632 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1633 tpa_info = &rxr->rx_tpa[agg_id];
1634 idx = RING_CMP(*raw_cons);
1635 if (agg_bufs) {
1636 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1637 return ERR_PTR(-EBUSY);
1638
1639 *event |= BNXT_AGG_EVENT;
1640 idx = NEXT_CMP(idx);
1641 }
1642 gro = !!TPA_END_GRO(tpa_end);
1643 }
1644 data = tpa_info->data;
1645 data_ptr = tpa_info->data_ptr;
1646 prefetch(data_ptr);
1647 len = tpa_info->len;
1648 mapping = tpa_info->mapping;
1649
1650 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1651 bnxt_abort_tpa(cpr, idx, agg_bufs);
1652 if (agg_bufs > MAX_SKB_FRAGS)
1653 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1654 agg_bufs, (int)MAX_SKB_FRAGS);
1655 return NULL;
1656 }
1657
1658 if (len <= bp->rx_copy_thresh) {
1659 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1660 if (!skb) {
1661 bnxt_abort_tpa(cpr, idx, agg_bufs);
1662 cpr->sw_stats.rx.rx_oom_discards += 1;
1663 return NULL;
1664 }
1665 } else {
1666 u8 *new_data;
1667 dma_addr_t new_mapping;
1668
1669 new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1670 if (!new_data) {
1671 bnxt_abort_tpa(cpr, idx, agg_bufs);
1672 cpr->sw_stats.rx.rx_oom_discards += 1;
1673 return NULL;
1674 }
1675
1676 tpa_info->data = new_data;
1677 tpa_info->data_ptr = new_data + bp->rx_offset;
1678 tpa_info->mapping = new_mapping;
1679
1680 skb = napi_build_skb(data, bp->rx_buf_size);
1681 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1682 bp->rx_buf_use_size, bp->rx_dir,
1683 DMA_ATTR_WEAK_ORDERING);
1684
1685 if (!skb) {
1686 skb_free_frag(data);
1687 bnxt_abort_tpa(cpr, idx, agg_bufs);
1688 cpr->sw_stats.rx.rx_oom_discards += 1;
1689 return NULL;
1690 }
1691 skb_reserve(skb, bp->rx_offset);
1692 skb_put(skb, len);
1693 }
1694
1695 if (agg_bufs) {
1696 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1697 if (!skb) {
1698 /* Page reuse already handled by bnxt_rx_pages(). */
1699 cpr->sw_stats.rx.rx_oom_discards += 1;
1700 return NULL;
1701 }
1702 }
1703
1704 skb->protocol =
1705 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1706
1707 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1708 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1709
1710 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1711 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1712 __be16 vlan_proto = htons(tpa_info->metadata >>
1713 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1714 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1715
1716 if (eth_type_vlan(vlan_proto)) {
1717 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1718 } else {
1719 dev_kfree_skb(skb);
1720 return NULL;
1721 }
1722 }
1723
1724 skb_checksum_none_assert(skb);
1725 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1726 skb->ip_summed = CHECKSUM_UNNECESSARY;
1727 skb->csum_level =
1728 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1729 }
1730
1731 if (gro)
1732 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1733
1734 return skb;
1735 }
1736
bnxt_tpa_agg(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)1737 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1738 struct rx_agg_cmp *rx_agg)
1739 {
1740 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1741 struct bnxt_tpa_info *tpa_info;
1742
1743 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1744 tpa_info = &rxr->rx_tpa[agg_id];
1745 BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1746 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1747 }
1748
bnxt_deliver_skb(struct bnxt * bp,struct bnxt_napi * bnapi,struct sk_buff * skb)1749 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1750 struct sk_buff *skb)
1751 {
1752 if (skb->dev != bp->dev) {
1753 /* this packet belongs to a vf-rep */
1754 bnxt_vf_rep_rx(bp, skb);
1755 return;
1756 }
1757 skb_record_rx_queue(skb, bnapi->index);
1758 skb_mark_for_recycle(skb);
1759 napi_gro_receive(&bnapi->napi, skb);
1760 }
1761
1762 /* returns the following:
1763 * 1 - 1 packet successfully received
1764 * 0 - successful TPA_START, packet not completed yet
1765 * -EBUSY - completion ring does not have all the agg buffers yet
1766 * -ENOMEM - packet aborted due to out of memory
1767 * -EIO - packet aborted due to hw error indicated in BD
1768 */
bnxt_rx_pkt(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)1769 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1770 u32 *raw_cons, u8 *event)
1771 {
1772 struct bnxt_napi *bnapi = cpr->bnapi;
1773 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1774 struct net_device *dev = bp->dev;
1775 struct rx_cmp *rxcmp;
1776 struct rx_cmp_ext *rxcmp1;
1777 u32 tmp_raw_cons = *raw_cons;
1778 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1779 struct bnxt_sw_rx_bd *rx_buf;
1780 unsigned int len;
1781 u8 *data_ptr, agg_bufs, cmp_type;
1782 bool xdp_active = false;
1783 dma_addr_t dma_addr;
1784 struct sk_buff *skb;
1785 struct xdp_buff xdp;
1786 u32 flags, misc;
1787 void *data;
1788 int rc = 0;
1789
1790 rxcmp = (struct rx_cmp *)
1791 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1792
1793 cmp_type = RX_CMP_TYPE(rxcmp);
1794
1795 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1796 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1797 goto next_rx_no_prod_no_len;
1798 }
1799
1800 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1801 cp_cons = RING_CMP(tmp_raw_cons);
1802 rxcmp1 = (struct rx_cmp_ext *)
1803 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1804
1805 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1806 return -EBUSY;
1807
1808 /* The valid test of the entry must be done first before
1809 * reading any further.
1810 */
1811 dma_rmb();
1812 prod = rxr->rx_prod;
1813
1814 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1815 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1816 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1817
1818 *event |= BNXT_RX_EVENT;
1819 goto next_rx_no_prod_no_len;
1820
1821 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1822 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1823 (struct rx_tpa_end_cmp *)rxcmp,
1824 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1825
1826 if (IS_ERR(skb))
1827 return -EBUSY;
1828
1829 rc = -ENOMEM;
1830 if (likely(skb)) {
1831 bnxt_deliver_skb(bp, bnapi, skb);
1832 rc = 1;
1833 }
1834 *event |= BNXT_RX_EVENT;
1835 goto next_rx_no_prod_no_len;
1836 }
1837
1838 cons = rxcmp->rx_cmp_opaque;
1839 if (unlikely(cons != rxr->rx_next_cons)) {
1840 int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1841
1842 /* 0xffff is forced error, don't print it */
1843 if (rxr->rx_next_cons != 0xffff)
1844 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1845 cons, rxr->rx_next_cons);
1846 bnxt_sched_reset_rxr(bp, rxr);
1847 if (rc1)
1848 return rc1;
1849 goto next_rx_no_prod_no_len;
1850 }
1851 rx_buf = &rxr->rx_buf_ring[cons];
1852 data = rx_buf->data;
1853 data_ptr = rx_buf->data_ptr;
1854 prefetch(data_ptr);
1855
1856 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1857 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1858
1859 if (agg_bufs) {
1860 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1861 return -EBUSY;
1862
1863 cp_cons = NEXT_CMP(cp_cons);
1864 *event |= BNXT_AGG_EVENT;
1865 }
1866 *event |= BNXT_RX_EVENT;
1867
1868 rx_buf->data = NULL;
1869 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1870 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1871
1872 bnxt_reuse_rx_data(rxr, cons, data);
1873 if (agg_bufs)
1874 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1875 false);
1876
1877 rc = -EIO;
1878 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1879 bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1880 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1881 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1882 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1883 rx_err);
1884 bnxt_sched_reset_rxr(bp, rxr);
1885 }
1886 }
1887 goto next_rx_no_len;
1888 }
1889
1890 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1891 len = flags >> RX_CMP_LEN_SHIFT;
1892 dma_addr = rx_buf->mapping;
1893
1894 if (bnxt_xdp_attached(bp, rxr)) {
1895 bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
1896 if (agg_bufs) {
1897 u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1898 cp_cons, agg_bufs,
1899 false);
1900 if (!frag_len) {
1901 cpr->sw_stats.rx.rx_oom_discards += 1;
1902 rc = -ENOMEM;
1903 goto next_rx;
1904 }
1905 }
1906 xdp_active = true;
1907 }
1908
1909 if (xdp_active) {
1910 if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
1911 rc = 1;
1912 goto next_rx;
1913 }
1914 }
1915
1916 if (len <= bp->rx_copy_thresh) {
1917 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1918 bnxt_reuse_rx_data(rxr, cons, data);
1919 if (!skb) {
1920 if (agg_bufs) {
1921 if (!xdp_active)
1922 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1923 agg_bufs, false);
1924 else
1925 bnxt_xdp_buff_frags_free(rxr, &xdp);
1926 }
1927 cpr->sw_stats.rx.rx_oom_discards += 1;
1928 rc = -ENOMEM;
1929 goto next_rx;
1930 }
1931 } else {
1932 u32 payload;
1933
1934 if (rx_buf->data_ptr == data_ptr)
1935 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1936 else
1937 payload = 0;
1938 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1939 payload | len);
1940 if (!skb) {
1941 cpr->sw_stats.rx.rx_oom_discards += 1;
1942 rc = -ENOMEM;
1943 goto next_rx;
1944 }
1945 }
1946
1947 if (agg_bufs) {
1948 if (!xdp_active) {
1949 skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1950 if (!skb) {
1951 cpr->sw_stats.rx.rx_oom_discards += 1;
1952 rc = -ENOMEM;
1953 goto next_rx;
1954 }
1955 } else {
1956 skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1957 if (!skb) {
1958 /* we should be able to free the old skb here */
1959 bnxt_xdp_buff_frags_free(rxr, &xdp);
1960 cpr->sw_stats.rx.rx_oom_discards += 1;
1961 rc = -ENOMEM;
1962 goto next_rx;
1963 }
1964 }
1965 }
1966
1967 if (RX_CMP_HASH_VALID(rxcmp)) {
1968 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1969 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1970
1971 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1972 if (hash_type != 1 && hash_type != 3)
1973 type = PKT_HASH_TYPE_L3;
1974 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1975 }
1976
1977 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1978 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1979
1980 if ((rxcmp1->rx_cmp_flags2 &
1981 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1982 (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1983 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1984 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1985 __be16 vlan_proto = htons(meta_data >>
1986 RX_CMP_FLAGS2_METADATA_TPID_SFT);
1987
1988 if (eth_type_vlan(vlan_proto)) {
1989 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1990 } else {
1991 dev_kfree_skb(skb);
1992 goto next_rx;
1993 }
1994 }
1995
1996 skb_checksum_none_assert(skb);
1997 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1998 if (dev->features & NETIF_F_RXCSUM) {
1999 skb->ip_summed = CHECKSUM_UNNECESSARY;
2000 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2001 }
2002 } else {
2003 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2004 if (dev->features & NETIF_F_RXCSUM)
2005 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
2006 }
2007 }
2008
2009 if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
2010 RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
2011 if (bp->flags & BNXT_FLAG_CHIP_P5) {
2012 u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2013 u64 ns, ts;
2014
2015 if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2016 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2017
2018 spin_lock_bh(&ptp->ptp_lock);
2019 ns = timecounter_cyc2time(&ptp->tc, ts);
2020 spin_unlock_bh(&ptp->ptp_lock);
2021 memset(skb_hwtstamps(skb), 0,
2022 sizeof(*skb_hwtstamps(skb)));
2023 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2024 }
2025 }
2026 }
2027 bnxt_deliver_skb(bp, bnapi, skb);
2028 rc = 1;
2029
2030 next_rx:
2031 cpr->rx_packets += 1;
2032 cpr->rx_bytes += len;
2033
2034 next_rx_no_len:
2035 rxr->rx_prod = NEXT_RX(prod);
2036 rxr->rx_next_cons = NEXT_RX(cons);
2037
2038 next_rx_no_prod_no_len:
2039 *raw_cons = tmp_raw_cons;
2040
2041 return rc;
2042 }
2043
2044 /* In netpoll mode, if we are using a combined completion ring, we need to
2045 * discard the rx packets and recycle the buffers.
2046 */
bnxt_force_rx_discard(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,u32 * raw_cons,u8 * event)2047 static int bnxt_force_rx_discard(struct bnxt *bp,
2048 struct bnxt_cp_ring_info *cpr,
2049 u32 *raw_cons, u8 *event)
2050 {
2051 u32 tmp_raw_cons = *raw_cons;
2052 struct rx_cmp_ext *rxcmp1;
2053 struct rx_cmp *rxcmp;
2054 u16 cp_cons;
2055 u8 cmp_type;
2056 int rc;
2057
2058 cp_cons = RING_CMP(tmp_raw_cons);
2059 rxcmp = (struct rx_cmp *)
2060 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2061
2062 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2063 cp_cons = RING_CMP(tmp_raw_cons);
2064 rxcmp1 = (struct rx_cmp_ext *)
2065 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2066
2067 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2068 return -EBUSY;
2069
2070 /* The valid test of the entry must be done first before
2071 * reading any further.
2072 */
2073 dma_rmb();
2074 cmp_type = RX_CMP_TYPE(rxcmp);
2075 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2076 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2077 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2078 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2079 struct rx_tpa_end_cmp_ext *tpa_end1;
2080
2081 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2082 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2083 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2084 }
2085 rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2086 if (rc && rc != -EBUSY)
2087 cpr->sw_stats.rx.rx_netpoll_discards += 1;
2088 return rc;
2089 }
2090
bnxt_fw_health_readl(struct bnxt * bp,int reg_idx)2091 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2092 {
2093 struct bnxt_fw_health *fw_health = bp->fw_health;
2094 u32 reg = fw_health->regs[reg_idx];
2095 u32 reg_type, reg_off, val = 0;
2096
2097 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2098 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2099 switch (reg_type) {
2100 case BNXT_FW_HEALTH_REG_TYPE_CFG:
2101 pci_read_config_dword(bp->pdev, reg_off, &val);
2102 break;
2103 case BNXT_FW_HEALTH_REG_TYPE_GRC:
2104 reg_off = fw_health->mapped_regs[reg_idx];
2105 fallthrough;
2106 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2107 val = readl(bp->bar0 + reg_off);
2108 break;
2109 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2110 val = readl(bp->bar1 + reg_off);
2111 break;
2112 }
2113 if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2114 val &= fw_health->fw_reset_inprog_reg_mask;
2115 return val;
2116 }
2117
bnxt_agg_ring_id_to_grp_idx(struct bnxt * bp,u16 ring_id)2118 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2119 {
2120 int i;
2121
2122 for (i = 0; i < bp->rx_nr_rings; i++) {
2123 u16 grp_idx = bp->rx_ring[i].bnapi->index;
2124 struct bnxt_ring_grp_info *grp_info;
2125
2126 grp_info = &bp->grp_info[grp_idx];
2127 if (grp_info->agg_fw_ring_id == ring_id)
2128 return grp_idx;
2129 }
2130 return INVALID_HW_RING_ID;
2131 }
2132
bnxt_event_error_report(struct bnxt * bp,u32 data1,u32 data2)2133 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2134 {
2135 u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2136
2137 switch (err_type) {
2138 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2139 netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2140 BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2141 break;
2142 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2143 netdev_warn(bp->dev, "Pause Storm detected!\n");
2144 break;
2145 case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2146 netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2147 break;
2148 default:
2149 netdev_err(bp->dev, "FW reported unknown error type %u\n",
2150 err_type);
2151 break;
2152 }
2153 }
2154
2155 #define BNXT_GET_EVENT_PORT(data) \
2156 ((data) & \
2157 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2158
2159 #define BNXT_EVENT_RING_TYPE(data2) \
2160 ((data2) & \
2161 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2162
2163 #define BNXT_EVENT_RING_TYPE_RX(data2) \
2164 (BNXT_EVENT_RING_TYPE(data2) == \
2165 ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2166
2167 #define BNXT_EVENT_PHC_EVENT_TYPE(data1) \
2168 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2169 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2170
2171 #define BNXT_EVENT_PHC_RTC_UPDATE(data1) \
2172 (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2173 ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2174
2175 #define BNXT_PHC_BITS 48
2176
bnxt_async_event_process(struct bnxt * bp,struct hwrm_async_event_cmpl * cmpl)2177 static int bnxt_async_event_process(struct bnxt *bp,
2178 struct hwrm_async_event_cmpl *cmpl)
2179 {
2180 u16 event_id = le16_to_cpu(cmpl->event_id);
2181 u32 data1 = le32_to_cpu(cmpl->event_data1);
2182 u32 data2 = le32_to_cpu(cmpl->event_data2);
2183
2184 netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2185 event_id, data1, data2);
2186
2187 /* TODO CHIMP_FW: Define event id's for link change, error etc */
2188 switch (event_id) {
2189 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2190 struct bnxt_link_info *link_info = &bp->link_info;
2191
2192 if (BNXT_VF(bp))
2193 goto async_event_process_exit;
2194
2195 /* print unsupported speed warning in forced speed mode only */
2196 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2197 (data1 & 0x20000)) {
2198 u16 fw_speed = link_info->force_link_speed;
2199 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2200
2201 if (speed != SPEED_UNKNOWN)
2202 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2203 speed);
2204 }
2205 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2206 }
2207 fallthrough;
2208 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2209 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2210 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2211 fallthrough;
2212 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2213 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2214 break;
2215 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2216 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2217 break;
2218 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2219 u16 port_id = BNXT_GET_EVENT_PORT(data1);
2220
2221 if (BNXT_VF(bp))
2222 break;
2223
2224 if (bp->pf.port_id != port_id)
2225 break;
2226
2227 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2228 break;
2229 }
2230 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2231 if (BNXT_PF(bp))
2232 goto async_event_process_exit;
2233 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2234 break;
2235 case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2236 char *type_str = "Solicited";
2237
2238 if (!bp->fw_health)
2239 goto async_event_process_exit;
2240
2241 bp->fw_reset_timestamp = jiffies;
2242 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2243 if (!bp->fw_reset_min_dsecs)
2244 bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2245 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2246 if (!bp->fw_reset_max_dsecs)
2247 bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2248 if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2249 set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2250 } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2251 type_str = "Fatal";
2252 bp->fw_health->fatalities++;
2253 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2254 } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2255 EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2256 type_str = "Non-fatal";
2257 bp->fw_health->survivals++;
2258 set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2259 }
2260 netif_warn(bp, hw, bp->dev,
2261 "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2262 type_str, data1, data2,
2263 bp->fw_reset_min_dsecs * 100,
2264 bp->fw_reset_max_dsecs * 100);
2265 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2266 break;
2267 }
2268 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2269 struct bnxt_fw_health *fw_health = bp->fw_health;
2270 char *status_desc = "healthy";
2271 u32 status;
2272
2273 if (!fw_health)
2274 goto async_event_process_exit;
2275
2276 if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2277 fw_health->enabled = false;
2278 netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2279 break;
2280 }
2281 fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2282 fw_health->tmr_multiplier =
2283 DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2284 bp->current_interval * 10);
2285 fw_health->tmr_counter = fw_health->tmr_multiplier;
2286 if (!fw_health->enabled)
2287 fw_health->last_fw_heartbeat =
2288 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2289 fw_health->last_fw_reset_cnt =
2290 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2291 status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2292 if (status != BNXT_FW_STATUS_HEALTHY)
2293 status_desc = "unhealthy";
2294 netif_info(bp, drv, bp->dev,
2295 "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2296 fw_health->primary ? "primary" : "backup", status,
2297 status_desc, fw_health->last_fw_reset_cnt);
2298 if (!fw_health->enabled) {
2299 /* Make sure tmr_counter is set and visible to
2300 * bnxt_health_check() before setting enabled to true.
2301 */
2302 smp_wmb();
2303 fw_health->enabled = true;
2304 }
2305 goto async_event_process_exit;
2306 }
2307 case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2308 netif_notice(bp, hw, bp->dev,
2309 "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2310 data1, data2);
2311 goto async_event_process_exit;
2312 case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2313 struct bnxt_rx_ring_info *rxr;
2314 u16 grp_idx;
2315
2316 if (bp->flags & BNXT_FLAG_CHIP_P5)
2317 goto async_event_process_exit;
2318
2319 netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2320 BNXT_EVENT_RING_TYPE(data2), data1);
2321 if (!BNXT_EVENT_RING_TYPE_RX(data2))
2322 goto async_event_process_exit;
2323
2324 grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2325 if (grp_idx == INVALID_HW_RING_ID) {
2326 netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2327 data1);
2328 goto async_event_process_exit;
2329 }
2330 rxr = bp->bnapi[grp_idx]->rx_ring;
2331 bnxt_sched_reset_rxr(bp, rxr);
2332 goto async_event_process_exit;
2333 }
2334 case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2335 struct bnxt_fw_health *fw_health = bp->fw_health;
2336
2337 netif_notice(bp, hw, bp->dev,
2338 "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2339 data1, data2);
2340 if (fw_health) {
2341 fw_health->echo_req_data1 = data1;
2342 fw_health->echo_req_data2 = data2;
2343 set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2344 break;
2345 }
2346 goto async_event_process_exit;
2347 }
2348 case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2349 bnxt_ptp_pps_event(bp, data1, data2);
2350 goto async_event_process_exit;
2351 }
2352 case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2353 bnxt_event_error_report(bp, data1, data2);
2354 goto async_event_process_exit;
2355 }
2356 case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2357 switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2358 case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2359 if (BNXT_PTP_USE_RTC(bp)) {
2360 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2361 u64 ns;
2362
2363 if (!ptp)
2364 goto async_event_process_exit;
2365
2366 spin_lock_bh(&ptp->ptp_lock);
2367 bnxt_ptp_update_current_time(bp);
2368 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2369 BNXT_PHC_BITS) | ptp->current_time);
2370 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2371 spin_unlock_bh(&ptp->ptp_lock);
2372 }
2373 break;
2374 }
2375 goto async_event_process_exit;
2376 }
2377 case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2378 u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2379
2380 hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2381 goto async_event_process_exit;
2382 }
2383 default:
2384 goto async_event_process_exit;
2385 }
2386 __bnxt_queue_sp_work(bp);
2387 async_event_process_exit:
2388 return 0;
2389 }
2390
bnxt_hwrm_handler(struct bnxt * bp,struct tx_cmp * txcmp)2391 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2392 {
2393 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2394 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2395 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2396 (struct hwrm_fwd_req_cmpl *)txcmp;
2397
2398 switch (cmpl_type) {
2399 case CMPL_BASE_TYPE_HWRM_DONE:
2400 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2401 hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2402 break;
2403
2404 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2405 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2406
2407 if ((vf_id < bp->pf.first_vf_id) ||
2408 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2409 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2410 vf_id);
2411 return -EINVAL;
2412 }
2413
2414 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2415 bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT);
2416 break;
2417
2418 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2419 bnxt_async_event_process(bp,
2420 (struct hwrm_async_event_cmpl *)txcmp);
2421 break;
2422
2423 default:
2424 break;
2425 }
2426
2427 return 0;
2428 }
2429
bnxt_msix(int irq,void * dev_instance)2430 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2431 {
2432 struct bnxt_napi *bnapi = dev_instance;
2433 struct bnxt *bp = bnapi->bp;
2434 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2435 u32 cons = RING_CMP(cpr->cp_raw_cons);
2436
2437 cpr->event_ctr++;
2438 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2439 napi_schedule(&bnapi->napi);
2440 return IRQ_HANDLED;
2441 }
2442
bnxt_has_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr)2443 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2444 {
2445 u32 raw_cons = cpr->cp_raw_cons;
2446 u16 cons = RING_CMP(raw_cons);
2447 struct tx_cmp *txcmp;
2448
2449 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2450
2451 return TX_CMP_VALID(txcmp, raw_cons);
2452 }
2453
bnxt_inta(int irq,void * dev_instance)2454 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2455 {
2456 struct bnxt_napi *bnapi = dev_instance;
2457 struct bnxt *bp = bnapi->bp;
2458 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2459 u32 cons = RING_CMP(cpr->cp_raw_cons);
2460 u32 int_status;
2461
2462 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2463
2464 if (!bnxt_has_work(bp, cpr)) {
2465 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2466 /* return if erroneous interrupt */
2467 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2468 return IRQ_NONE;
2469 }
2470
2471 /* disable ring IRQ */
2472 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2473
2474 /* Return here if interrupt is shared and is disabled. */
2475 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2476 return IRQ_HANDLED;
2477
2478 napi_schedule(&bnapi->napi);
2479 return IRQ_HANDLED;
2480 }
2481
__bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2482 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2483 int budget)
2484 {
2485 struct bnxt_napi *bnapi = cpr->bnapi;
2486 u32 raw_cons = cpr->cp_raw_cons;
2487 u32 cons;
2488 int tx_pkts = 0;
2489 int rx_pkts = 0;
2490 u8 event = 0;
2491 struct tx_cmp *txcmp;
2492
2493 cpr->has_more_work = 0;
2494 cpr->had_work_done = 1;
2495 while (1) {
2496 int rc;
2497
2498 cons = RING_CMP(raw_cons);
2499 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2500
2501 if (!TX_CMP_VALID(txcmp, raw_cons))
2502 break;
2503
2504 /* The valid test of the entry must be done first before
2505 * reading any further.
2506 */
2507 dma_rmb();
2508 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2509 tx_pkts++;
2510 /* return full budget so NAPI will complete. */
2511 if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2512 rx_pkts = budget;
2513 raw_cons = NEXT_RAW_CMP(raw_cons);
2514 if (budget)
2515 cpr->has_more_work = 1;
2516 break;
2517 }
2518 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2519 if (likely(budget))
2520 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2521 else
2522 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2523 &event);
2524 if (likely(rc >= 0))
2525 rx_pkts += rc;
2526 /* Increment rx_pkts when rc is -ENOMEM to count towards
2527 * the NAPI budget. Otherwise, we may potentially loop
2528 * here forever if we consistently cannot allocate
2529 * buffers.
2530 */
2531 else if (rc == -ENOMEM && budget)
2532 rx_pkts++;
2533 else if (rc == -EBUSY) /* partial completion */
2534 break;
2535 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2536 CMPL_BASE_TYPE_HWRM_DONE) ||
2537 (TX_CMP_TYPE(txcmp) ==
2538 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2539 (TX_CMP_TYPE(txcmp) ==
2540 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2541 bnxt_hwrm_handler(bp, txcmp);
2542 }
2543 raw_cons = NEXT_RAW_CMP(raw_cons);
2544
2545 if (rx_pkts && rx_pkts == budget) {
2546 cpr->has_more_work = 1;
2547 break;
2548 }
2549 }
2550
2551 if (event & BNXT_REDIRECT_EVENT)
2552 xdp_do_flush();
2553
2554 if (event & BNXT_TX_EVENT) {
2555 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2556 u16 prod = txr->tx_prod;
2557
2558 /* Sync BD data before updating doorbell */
2559 wmb();
2560
2561 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2562 }
2563
2564 cpr->cp_raw_cons = raw_cons;
2565 bnapi->tx_pkts += tx_pkts;
2566 bnapi->events |= event;
2567 return rx_pkts;
2568 }
2569
__bnxt_poll_work_done(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)2570 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2571 int budget)
2572 {
2573 if (bnapi->tx_pkts && !bnapi->tx_fault)
2574 bnapi->tx_int(bp, bnapi, budget);
2575
2576 if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2577 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2578
2579 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2580 }
2581 if (bnapi->events & BNXT_AGG_EVENT) {
2582 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2583
2584 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2585 }
2586 bnapi->events = 0;
2587 }
2588
bnxt_poll_work(struct bnxt * bp,struct bnxt_cp_ring_info * cpr,int budget)2589 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2590 int budget)
2591 {
2592 struct bnxt_napi *bnapi = cpr->bnapi;
2593 int rx_pkts;
2594
2595 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2596
2597 /* ACK completion ring before freeing tx ring and producing new
2598 * buffers in rx/agg rings to prevent overflowing the completion
2599 * ring.
2600 */
2601 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2602
2603 __bnxt_poll_work_done(bp, bnapi, budget);
2604 return rx_pkts;
2605 }
2606
bnxt_poll_nitroa0(struct napi_struct * napi,int budget)2607 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2608 {
2609 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2610 struct bnxt *bp = bnapi->bp;
2611 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2612 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2613 struct tx_cmp *txcmp;
2614 struct rx_cmp_ext *rxcmp1;
2615 u32 cp_cons, tmp_raw_cons;
2616 u32 raw_cons = cpr->cp_raw_cons;
2617 bool flush_xdp = false;
2618 u32 rx_pkts = 0;
2619 u8 event = 0;
2620
2621 while (1) {
2622 int rc;
2623
2624 cp_cons = RING_CMP(raw_cons);
2625 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2626
2627 if (!TX_CMP_VALID(txcmp, raw_cons))
2628 break;
2629
2630 /* The valid test of the entry must be done first before
2631 * reading any further.
2632 */
2633 dma_rmb();
2634 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2635 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2636 cp_cons = RING_CMP(tmp_raw_cons);
2637 rxcmp1 = (struct rx_cmp_ext *)
2638 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2639
2640 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2641 break;
2642
2643 /* force an error to recycle the buffer */
2644 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2645 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2646
2647 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2648 if (likely(rc == -EIO) && budget)
2649 rx_pkts++;
2650 else if (rc == -EBUSY) /* partial completion */
2651 break;
2652 if (event & BNXT_REDIRECT_EVENT)
2653 flush_xdp = true;
2654 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2655 CMPL_BASE_TYPE_HWRM_DONE)) {
2656 bnxt_hwrm_handler(bp, txcmp);
2657 } else {
2658 netdev_err(bp->dev,
2659 "Invalid completion received on special ring\n");
2660 }
2661 raw_cons = NEXT_RAW_CMP(raw_cons);
2662
2663 if (rx_pkts == budget)
2664 break;
2665 }
2666
2667 cpr->cp_raw_cons = raw_cons;
2668 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2669 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2670
2671 if (event & BNXT_AGG_EVENT)
2672 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2673 if (flush_xdp)
2674 xdp_do_flush();
2675
2676 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2677 napi_complete_done(napi, rx_pkts);
2678 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2679 }
2680 return rx_pkts;
2681 }
2682
bnxt_poll(struct napi_struct * napi,int budget)2683 static int bnxt_poll(struct napi_struct *napi, int budget)
2684 {
2685 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2686 struct bnxt *bp = bnapi->bp;
2687 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2688 int work_done = 0;
2689
2690 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2691 napi_complete(napi);
2692 return 0;
2693 }
2694 while (1) {
2695 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2696
2697 if (work_done >= budget) {
2698 if (!budget)
2699 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2700 break;
2701 }
2702
2703 if (!bnxt_has_work(bp, cpr)) {
2704 if (napi_complete_done(napi, work_done))
2705 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2706 break;
2707 }
2708 }
2709 if (bp->flags & BNXT_FLAG_DIM) {
2710 struct dim_sample dim_sample = {};
2711
2712 dim_update_sample(cpr->event_ctr,
2713 cpr->rx_packets,
2714 cpr->rx_bytes,
2715 &dim_sample);
2716 net_dim(&cpr->dim, dim_sample);
2717 }
2718 return work_done;
2719 }
2720
__bnxt_poll_cqs(struct bnxt * bp,struct bnxt_napi * bnapi,int budget)2721 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2722 {
2723 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2724 int i, work_done = 0;
2725
2726 for (i = 0; i < 2; i++) {
2727 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2728
2729 if (cpr2) {
2730 work_done += __bnxt_poll_work(bp, cpr2,
2731 budget - work_done);
2732 cpr->has_more_work |= cpr2->has_more_work;
2733 }
2734 }
2735 return work_done;
2736 }
2737
__bnxt_poll_cqs_done(struct bnxt * bp,struct bnxt_napi * bnapi,u64 dbr_type,int budget)2738 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2739 u64 dbr_type, int budget)
2740 {
2741 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2742 int i;
2743
2744 for (i = 0; i < 2; i++) {
2745 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2746 struct bnxt_db_info *db;
2747
2748 if (cpr2 && cpr2->had_work_done) {
2749 db = &cpr2->cp_db;
2750 bnxt_writeq(bp, db->db_key64 | dbr_type |
2751 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2752 cpr2->had_work_done = 0;
2753 }
2754 }
2755 __bnxt_poll_work_done(bp, bnapi, budget);
2756 }
2757
bnxt_poll_p5(struct napi_struct * napi,int budget)2758 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2759 {
2760 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2761 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2762 struct bnxt_cp_ring_info *cpr_rx;
2763 u32 raw_cons = cpr->cp_raw_cons;
2764 struct bnxt *bp = bnapi->bp;
2765 struct nqe_cn *nqcmp;
2766 int work_done = 0;
2767 u32 cons;
2768
2769 if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2770 napi_complete(napi);
2771 return 0;
2772 }
2773 if (cpr->has_more_work) {
2774 cpr->has_more_work = 0;
2775 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2776 }
2777 while (1) {
2778 cons = RING_CMP(raw_cons);
2779 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2780
2781 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2782 if (cpr->has_more_work)
2783 break;
2784
2785 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2786 budget);
2787 cpr->cp_raw_cons = raw_cons;
2788 if (napi_complete_done(napi, work_done))
2789 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2790 cpr->cp_raw_cons);
2791 goto poll_done;
2792 }
2793
2794 /* The valid test of the entry must be done first before
2795 * reading any further.
2796 */
2797 dma_rmb();
2798
2799 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2800 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2801 struct bnxt_cp_ring_info *cpr2;
2802
2803 /* No more budget for RX work */
2804 if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2805 break;
2806
2807 cpr2 = cpr->cp_ring_arr[idx];
2808 work_done += __bnxt_poll_work(bp, cpr2,
2809 budget - work_done);
2810 cpr->has_more_work |= cpr2->has_more_work;
2811 } else {
2812 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2813 }
2814 raw_cons = NEXT_RAW_CMP(raw_cons);
2815 }
2816 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
2817 if (raw_cons != cpr->cp_raw_cons) {
2818 cpr->cp_raw_cons = raw_cons;
2819 BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2820 }
2821 poll_done:
2822 cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2823 if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2824 struct dim_sample dim_sample = {};
2825
2826 dim_update_sample(cpr->event_ctr,
2827 cpr_rx->rx_packets,
2828 cpr_rx->rx_bytes,
2829 &dim_sample);
2830 net_dim(&cpr->dim, dim_sample);
2831 }
2832 return work_done;
2833 }
2834
bnxt_free_tx_skbs(struct bnxt * bp)2835 static void bnxt_free_tx_skbs(struct bnxt *bp)
2836 {
2837 int i, max_idx;
2838 struct pci_dev *pdev = bp->pdev;
2839
2840 if (!bp->tx_ring)
2841 return;
2842
2843 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2844 for (i = 0; i < bp->tx_nr_rings; i++) {
2845 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2846 int j;
2847
2848 if (!txr->tx_buf_ring)
2849 continue;
2850
2851 for (j = 0; j < max_idx;) {
2852 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2853 struct sk_buff *skb;
2854 int k, last;
2855
2856 if (i < bp->tx_nr_rings_xdp &&
2857 tx_buf->action == XDP_REDIRECT) {
2858 dma_unmap_single(&pdev->dev,
2859 dma_unmap_addr(tx_buf, mapping),
2860 dma_unmap_len(tx_buf, len),
2861 DMA_TO_DEVICE);
2862 xdp_return_frame(tx_buf->xdpf);
2863 tx_buf->action = 0;
2864 tx_buf->xdpf = NULL;
2865 j++;
2866 continue;
2867 }
2868
2869 skb = tx_buf->skb;
2870 if (!skb) {
2871 j++;
2872 continue;
2873 }
2874
2875 tx_buf->skb = NULL;
2876
2877 if (tx_buf->is_push) {
2878 dev_kfree_skb(skb);
2879 j += 2;
2880 continue;
2881 }
2882
2883 dma_unmap_single(&pdev->dev,
2884 dma_unmap_addr(tx_buf, mapping),
2885 skb_headlen(skb),
2886 DMA_TO_DEVICE);
2887
2888 last = tx_buf->nr_frags;
2889 j += 2;
2890 for (k = 0; k < last; k++, j++) {
2891 int ring_idx = j & bp->tx_ring_mask;
2892 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2893
2894 tx_buf = &txr->tx_buf_ring[ring_idx];
2895 dma_unmap_page(
2896 &pdev->dev,
2897 dma_unmap_addr(tx_buf, mapping),
2898 skb_frag_size(frag), DMA_TO_DEVICE);
2899 }
2900 dev_kfree_skb(skb);
2901 }
2902 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2903 }
2904 }
2905
bnxt_free_one_rx_ring_skbs(struct bnxt * bp,int ring_nr)2906 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2907 {
2908 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2909 struct pci_dev *pdev = bp->pdev;
2910 struct bnxt_tpa_idx_map *map;
2911 int i, max_idx, max_agg_idx;
2912
2913 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2914 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2915 if (!rxr->rx_tpa)
2916 goto skip_rx_tpa_free;
2917
2918 for (i = 0; i < bp->max_tpa; i++) {
2919 struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2920 u8 *data = tpa_info->data;
2921
2922 if (!data)
2923 continue;
2924
2925 dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2926 bp->rx_buf_use_size, bp->rx_dir,
2927 DMA_ATTR_WEAK_ORDERING);
2928
2929 tpa_info->data = NULL;
2930
2931 skb_free_frag(data);
2932 }
2933
2934 skip_rx_tpa_free:
2935 if (!rxr->rx_buf_ring)
2936 goto skip_rx_buf_free;
2937
2938 for (i = 0; i < max_idx; i++) {
2939 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2940 dma_addr_t mapping = rx_buf->mapping;
2941 void *data = rx_buf->data;
2942
2943 if (!data)
2944 continue;
2945
2946 rx_buf->data = NULL;
2947 if (BNXT_RX_PAGE_MODE(bp)) {
2948 page_pool_recycle_direct(rxr->page_pool, data);
2949 } else {
2950 dma_unmap_single_attrs(&pdev->dev, mapping,
2951 bp->rx_buf_use_size, bp->rx_dir,
2952 DMA_ATTR_WEAK_ORDERING);
2953 skb_free_frag(data);
2954 }
2955 }
2956
2957 skip_rx_buf_free:
2958 if (!rxr->rx_agg_ring)
2959 goto skip_rx_agg_free;
2960
2961 for (i = 0; i < max_agg_idx; i++) {
2962 struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2963 struct page *page = rx_agg_buf->page;
2964
2965 if (!page)
2966 continue;
2967
2968 rx_agg_buf->page = NULL;
2969 __clear_bit(i, rxr->rx_agg_bmap);
2970
2971 page_pool_recycle_direct(rxr->page_pool, page);
2972 }
2973
2974 skip_rx_agg_free:
2975 map = rxr->rx_tpa_idx_map;
2976 if (map)
2977 memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2978 }
2979
bnxt_free_rx_skbs(struct bnxt * bp)2980 static void bnxt_free_rx_skbs(struct bnxt *bp)
2981 {
2982 int i;
2983
2984 if (!bp->rx_ring)
2985 return;
2986
2987 for (i = 0; i < bp->rx_nr_rings; i++)
2988 bnxt_free_one_rx_ring_skbs(bp, i);
2989 }
2990
bnxt_free_skbs(struct bnxt * bp)2991 static void bnxt_free_skbs(struct bnxt *bp)
2992 {
2993 bnxt_free_tx_skbs(bp);
2994 bnxt_free_rx_skbs(bp);
2995 }
2996
bnxt_init_ctx_mem(struct bnxt_mem_init * mem_init,void * p,int len)2997 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
2998 {
2999 u8 init_val = mem_init->init_val;
3000 u16 offset = mem_init->offset;
3001 u8 *p2 = p;
3002 int i;
3003
3004 if (!init_val)
3005 return;
3006 if (offset == BNXT_MEM_INVALID_OFFSET) {
3007 memset(p, init_val, len);
3008 return;
3009 }
3010 for (i = 0; i < len; i += mem_init->size)
3011 *(p2 + i + offset) = init_val;
3012 }
3013
bnxt_free_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3014 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3015 {
3016 struct pci_dev *pdev = bp->pdev;
3017 int i;
3018
3019 if (!rmem->pg_arr)
3020 goto skip_pages;
3021
3022 for (i = 0; i < rmem->nr_pages; i++) {
3023 if (!rmem->pg_arr[i])
3024 continue;
3025
3026 dma_free_coherent(&pdev->dev, rmem->page_size,
3027 rmem->pg_arr[i], rmem->dma_arr[i]);
3028
3029 rmem->pg_arr[i] = NULL;
3030 }
3031 skip_pages:
3032 if (rmem->pg_tbl) {
3033 size_t pg_tbl_size = rmem->nr_pages * 8;
3034
3035 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3036 pg_tbl_size = rmem->page_size;
3037 dma_free_coherent(&pdev->dev, pg_tbl_size,
3038 rmem->pg_tbl, rmem->pg_tbl_map);
3039 rmem->pg_tbl = NULL;
3040 }
3041 if (rmem->vmem_size && *rmem->vmem) {
3042 vfree(*rmem->vmem);
3043 *rmem->vmem = NULL;
3044 }
3045 }
3046
bnxt_alloc_ring(struct bnxt * bp,struct bnxt_ring_mem_info * rmem)3047 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3048 {
3049 struct pci_dev *pdev = bp->pdev;
3050 u64 valid_bit = 0;
3051 int i;
3052
3053 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3054 valid_bit = PTU_PTE_VALID;
3055 if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3056 size_t pg_tbl_size = rmem->nr_pages * 8;
3057
3058 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3059 pg_tbl_size = rmem->page_size;
3060 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3061 &rmem->pg_tbl_map,
3062 GFP_KERNEL);
3063 if (!rmem->pg_tbl)
3064 return -ENOMEM;
3065 }
3066
3067 for (i = 0; i < rmem->nr_pages; i++) {
3068 u64 extra_bits = valid_bit;
3069
3070 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3071 rmem->page_size,
3072 &rmem->dma_arr[i],
3073 GFP_KERNEL);
3074 if (!rmem->pg_arr[i])
3075 return -ENOMEM;
3076
3077 if (rmem->mem_init)
3078 bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
3079 rmem->page_size);
3080 if (rmem->nr_pages > 1 || rmem->depth > 0) {
3081 if (i == rmem->nr_pages - 2 &&
3082 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3083 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3084 else if (i == rmem->nr_pages - 1 &&
3085 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3086 extra_bits |= PTU_PTE_LAST;
3087 rmem->pg_tbl[i] =
3088 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3089 }
3090 }
3091
3092 if (rmem->vmem_size) {
3093 *rmem->vmem = vzalloc(rmem->vmem_size);
3094 if (!(*rmem->vmem))
3095 return -ENOMEM;
3096 }
3097 return 0;
3098 }
3099
bnxt_free_tpa_info(struct bnxt * bp)3100 static void bnxt_free_tpa_info(struct bnxt *bp)
3101 {
3102 int i, j;
3103
3104 for (i = 0; i < bp->rx_nr_rings; i++) {
3105 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3106
3107 kfree(rxr->rx_tpa_idx_map);
3108 rxr->rx_tpa_idx_map = NULL;
3109 if (rxr->rx_tpa) {
3110 for (j = 0; j < bp->max_tpa; j++) {
3111 kfree(rxr->rx_tpa[j].agg_arr);
3112 rxr->rx_tpa[j].agg_arr = NULL;
3113 }
3114 }
3115 kfree(rxr->rx_tpa);
3116 rxr->rx_tpa = NULL;
3117 }
3118 }
3119
bnxt_alloc_tpa_info(struct bnxt * bp)3120 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3121 {
3122 int i, j;
3123
3124 bp->max_tpa = MAX_TPA;
3125 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3126 if (!bp->max_tpa_v2)
3127 return 0;
3128 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3129 }
3130
3131 for (i = 0; i < bp->rx_nr_rings; i++) {
3132 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3133 struct rx_agg_cmp *agg;
3134
3135 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3136 GFP_KERNEL);
3137 if (!rxr->rx_tpa)
3138 return -ENOMEM;
3139
3140 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3141 continue;
3142 for (j = 0; j < bp->max_tpa; j++) {
3143 agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
3144 if (!agg)
3145 return -ENOMEM;
3146 rxr->rx_tpa[j].agg_arr = agg;
3147 }
3148 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3149 GFP_KERNEL);
3150 if (!rxr->rx_tpa_idx_map)
3151 return -ENOMEM;
3152 }
3153 return 0;
3154 }
3155
bnxt_free_rx_rings(struct bnxt * bp)3156 static void bnxt_free_rx_rings(struct bnxt *bp)
3157 {
3158 int i;
3159
3160 if (!bp->rx_ring)
3161 return;
3162
3163 bnxt_free_tpa_info(bp);
3164 for (i = 0; i < bp->rx_nr_rings; i++) {
3165 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3166 struct bnxt_ring_struct *ring;
3167
3168 if (rxr->xdp_prog)
3169 bpf_prog_put(rxr->xdp_prog);
3170
3171 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3172 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3173
3174 page_pool_destroy(rxr->page_pool);
3175 rxr->page_pool = NULL;
3176
3177 kfree(rxr->rx_agg_bmap);
3178 rxr->rx_agg_bmap = NULL;
3179
3180 ring = &rxr->rx_ring_struct;
3181 bnxt_free_ring(bp, &ring->ring_mem);
3182
3183 ring = &rxr->rx_agg_ring_struct;
3184 bnxt_free_ring(bp, &ring->ring_mem);
3185 }
3186 }
3187
bnxt_alloc_rx_page_pool(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)3188 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3189 struct bnxt_rx_ring_info *rxr)
3190 {
3191 struct page_pool_params pp = { 0 };
3192
3193 pp.pool_size = bp->rx_agg_ring_size;
3194 if (BNXT_RX_PAGE_MODE(bp))
3195 pp.pool_size += bp->rx_ring_size;
3196 pp.nid = dev_to_node(&bp->pdev->dev);
3197 pp.napi = &rxr->bnapi->napi;
3198 pp.dev = &bp->pdev->dev;
3199 pp.dma_dir = bp->rx_dir;
3200 pp.max_len = PAGE_SIZE;
3201 pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
3202 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
3203 pp.flags |= PP_FLAG_PAGE_FRAG;
3204
3205 rxr->page_pool = page_pool_create(&pp);
3206 if (IS_ERR(rxr->page_pool)) {
3207 int err = PTR_ERR(rxr->page_pool);
3208
3209 rxr->page_pool = NULL;
3210 return err;
3211 }
3212 return 0;
3213 }
3214
bnxt_alloc_rx_rings(struct bnxt * bp)3215 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3216 {
3217 int i, rc = 0, agg_rings = 0;
3218
3219 if (!bp->rx_ring)
3220 return -ENOMEM;
3221
3222 if (bp->flags & BNXT_FLAG_AGG_RINGS)
3223 agg_rings = 1;
3224
3225 for (i = 0; i < bp->rx_nr_rings; i++) {
3226 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3227 struct bnxt_ring_struct *ring;
3228
3229 ring = &rxr->rx_ring_struct;
3230
3231 rc = bnxt_alloc_rx_page_pool(bp, rxr);
3232 if (rc)
3233 return rc;
3234
3235 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3236 if (rc < 0)
3237 return rc;
3238
3239 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3240 MEM_TYPE_PAGE_POOL,
3241 rxr->page_pool);
3242 if (rc) {
3243 xdp_rxq_info_unreg(&rxr->xdp_rxq);
3244 return rc;
3245 }
3246
3247 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3248 if (rc)
3249 return rc;
3250
3251 ring->grp_idx = i;
3252 if (agg_rings) {
3253 u16 mem_size;
3254
3255 ring = &rxr->rx_agg_ring_struct;
3256 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3257 if (rc)
3258 return rc;
3259
3260 ring->grp_idx = i;
3261 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3262 mem_size = rxr->rx_agg_bmap_size / 8;
3263 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3264 if (!rxr->rx_agg_bmap)
3265 return -ENOMEM;
3266 }
3267 }
3268 if (bp->flags & BNXT_FLAG_TPA)
3269 rc = bnxt_alloc_tpa_info(bp);
3270 return rc;
3271 }
3272
bnxt_free_tx_rings(struct bnxt * bp)3273 static void bnxt_free_tx_rings(struct bnxt *bp)
3274 {
3275 int i;
3276 struct pci_dev *pdev = bp->pdev;
3277
3278 if (!bp->tx_ring)
3279 return;
3280
3281 for (i = 0; i < bp->tx_nr_rings; i++) {
3282 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3283 struct bnxt_ring_struct *ring;
3284
3285 if (txr->tx_push) {
3286 dma_free_coherent(&pdev->dev, bp->tx_push_size,
3287 txr->tx_push, txr->tx_push_mapping);
3288 txr->tx_push = NULL;
3289 }
3290
3291 ring = &txr->tx_ring_struct;
3292
3293 bnxt_free_ring(bp, &ring->ring_mem);
3294 }
3295 }
3296
bnxt_alloc_tx_rings(struct bnxt * bp)3297 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3298 {
3299 int i, j, rc;
3300 struct pci_dev *pdev = bp->pdev;
3301
3302 bp->tx_push_size = 0;
3303 if (bp->tx_push_thresh) {
3304 int push_size;
3305
3306 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3307 bp->tx_push_thresh);
3308
3309 if (push_size > 256) {
3310 push_size = 0;
3311 bp->tx_push_thresh = 0;
3312 }
3313
3314 bp->tx_push_size = push_size;
3315 }
3316
3317 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3318 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3319 struct bnxt_ring_struct *ring;
3320 u8 qidx;
3321
3322 ring = &txr->tx_ring_struct;
3323
3324 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3325 if (rc)
3326 return rc;
3327
3328 ring->grp_idx = txr->bnapi->index;
3329 if (bp->tx_push_size) {
3330 dma_addr_t mapping;
3331
3332 /* One pre-allocated DMA buffer to backup
3333 * TX push operation
3334 */
3335 txr->tx_push = dma_alloc_coherent(&pdev->dev,
3336 bp->tx_push_size,
3337 &txr->tx_push_mapping,
3338 GFP_KERNEL);
3339
3340 if (!txr->tx_push)
3341 return -ENOMEM;
3342
3343 mapping = txr->tx_push_mapping +
3344 sizeof(struct tx_push_bd);
3345 txr->data_mapping = cpu_to_le64(mapping);
3346 }
3347 qidx = bp->tc_to_qidx[j];
3348 ring->queue_id = bp->q_info[qidx].queue_id;
3349 spin_lock_init(&txr->xdp_tx_lock);
3350 if (i < bp->tx_nr_rings_xdp)
3351 continue;
3352 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3353 j++;
3354 }
3355 return 0;
3356 }
3357
bnxt_free_cp_arrays(struct bnxt_cp_ring_info * cpr)3358 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3359 {
3360 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3361
3362 kfree(cpr->cp_desc_ring);
3363 cpr->cp_desc_ring = NULL;
3364 ring->ring_mem.pg_arr = NULL;
3365 kfree(cpr->cp_desc_mapping);
3366 cpr->cp_desc_mapping = NULL;
3367 ring->ring_mem.dma_arr = NULL;
3368 }
3369
bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info * cpr,int n)3370 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3371 {
3372 cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3373 if (!cpr->cp_desc_ring)
3374 return -ENOMEM;
3375 cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3376 GFP_KERNEL);
3377 if (!cpr->cp_desc_mapping)
3378 return -ENOMEM;
3379 return 0;
3380 }
3381
bnxt_free_all_cp_arrays(struct bnxt * bp)3382 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3383 {
3384 int i;
3385
3386 if (!bp->bnapi)
3387 return;
3388 for (i = 0; i < bp->cp_nr_rings; i++) {
3389 struct bnxt_napi *bnapi = bp->bnapi[i];
3390
3391 if (!bnapi)
3392 continue;
3393 bnxt_free_cp_arrays(&bnapi->cp_ring);
3394 }
3395 }
3396
bnxt_alloc_all_cp_arrays(struct bnxt * bp)3397 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3398 {
3399 int i, n = bp->cp_nr_pages;
3400
3401 for (i = 0; i < bp->cp_nr_rings; i++) {
3402 struct bnxt_napi *bnapi = bp->bnapi[i];
3403 int rc;
3404
3405 if (!bnapi)
3406 continue;
3407 rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3408 if (rc)
3409 return rc;
3410 }
3411 return 0;
3412 }
3413
bnxt_free_cp_rings(struct bnxt * bp)3414 static void bnxt_free_cp_rings(struct bnxt *bp)
3415 {
3416 int i;
3417
3418 if (!bp->bnapi)
3419 return;
3420
3421 for (i = 0; i < bp->cp_nr_rings; i++) {
3422 struct bnxt_napi *bnapi = bp->bnapi[i];
3423 struct bnxt_cp_ring_info *cpr;
3424 struct bnxt_ring_struct *ring;
3425 int j;
3426
3427 if (!bnapi)
3428 continue;
3429
3430 cpr = &bnapi->cp_ring;
3431 ring = &cpr->cp_ring_struct;
3432
3433 bnxt_free_ring(bp, &ring->ring_mem);
3434
3435 for (j = 0; j < 2; j++) {
3436 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3437
3438 if (cpr2) {
3439 ring = &cpr2->cp_ring_struct;
3440 bnxt_free_ring(bp, &ring->ring_mem);
3441 bnxt_free_cp_arrays(cpr2);
3442 kfree(cpr2);
3443 cpr->cp_ring_arr[j] = NULL;
3444 }
3445 }
3446 }
3447 }
3448
bnxt_alloc_cp_sub_ring(struct bnxt * bp)3449 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3450 {
3451 struct bnxt_ring_mem_info *rmem;
3452 struct bnxt_ring_struct *ring;
3453 struct bnxt_cp_ring_info *cpr;
3454 int rc;
3455
3456 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3457 if (!cpr)
3458 return NULL;
3459
3460 rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3461 if (rc) {
3462 bnxt_free_cp_arrays(cpr);
3463 kfree(cpr);
3464 return NULL;
3465 }
3466 ring = &cpr->cp_ring_struct;
3467 rmem = &ring->ring_mem;
3468 rmem->nr_pages = bp->cp_nr_pages;
3469 rmem->page_size = HW_CMPD_RING_SIZE;
3470 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3471 rmem->dma_arr = cpr->cp_desc_mapping;
3472 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3473 rc = bnxt_alloc_ring(bp, rmem);
3474 if (rc) {
3475 bnxt_free_ring(bp, rmem);
3476 bnxt_free_cp_arrays(cpr);
3477 kfree(cpr);
3478 cpr = NULL;
3479 }
3480 return cpr;
3481 }
3482
bnxt_alloc_cp_rings(struct bnxt * bp)3483 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3484 {
3485 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3486 int i, rc, ulp_base_vec, ulp_msix;
3487
3488 ulp_msix = bnxt_get_ulp_msix_num(bp);
3489 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3490 for (i = 0; i < bp->cp_nr_rings; i++) {
3491 struct bnxt_napi *bnapi = bp->bnapi[i];
3492 struct bnxt_cp_ring_info *cpr;
3493 struct bnxt_ring_struct *ring;
3494
3495 if (!bnapi)
3496 continue;
3497
3498 cpr = &bnapi->cp_ring;
3499 cpr->bnapi = bnapi;
3500 ring = &cpr->cp_ring_struct;
3501
3502 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3503 if (rc)
3504 return rc;
3505
3506 if (ulp_msix && i >= ulp_base_vec)
3507 ring->map_idx = i + ulp_msix;
3508 else
3509 ring->map_idx = i;
3510
3511 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3512 continue;
3513
3514 if (i < bp->rx_nr_rings) {
3515 struct bnxt_cp_ring_info *cpr2 =
3516 bnxt_alloc_cp_sub_ring(bp);
3517
3518 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3519 if (!cpr2)
3520 return -ENOMEM;
3521 cpr2->bnapi = bnapi;
3522 }
3523 if ((sh && i < bp->tx_nr_rings) ||
3524 (!sh && i >= bp->rx_nr_rings)) {
3525 struct bnxt_cp_ring_info *cpr2 =
3526 bnxt_alloc_cp_sub_ring(bp);
3527
3528 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3529 if (!cpr2)
3530 return -ENOMEM;
3531 cpr2->bnapi = bnapi;
3532 }
3533 }
3534 return 0;
3535 }
3536
bnxt_init_ring_struct(struct bnxt * bp)3537 static void bnxt_init_ring_struct(struct bnxt *bp)
3538 {
3539 int i;
3540
3541 for (i = 0; i < bp->cp_nr_rings; i++) {
3542 struct bnxt_napi *bnapi = bp->bnapi[i];
3543 struct bnxt_ring_mem_info *rmem;
3544 struct bnxt_cp_ring_info *cpr;
3545 struct bnxt_rx_ring_info *rxr;
3546 struct bnxt_tx_ring_info *txr;
3547 struct bnxt_ring_struct *ring;
3548
3549 if (!bnapi)
3550 continue;
3551
3552 cpr = &bnapi->cp_ring;
3553 ring = &cpr->cp_ring_struct;
3554 rmem = &ring->ring_mem;
3555 rmem->nr_pages = bp->cp_nr_pages;
3556 rmem->page_size = HW_CMPD_RING_SIZE;
3557 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3558 rmem->dma_arr = cpr->cp_desc_mapping;
3559 rmem->vmem_size = 0;
3560
3561 rxr = bnapi->rx_ring;
3562 if (!rxr)
3563 goto skip_rx;
3564
3565 ring = &rxr->rx_ring_struct;
3566 rmem = &ring->ring_mem;
3567 rmem->nr_pages = bp->rx_nr_pages;
3568 rmem->page_size = HW_RXBD_RING_SIZE;
3569 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3570 rmem->dma_arr = rxr->rx_desc_mapping;
3571 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3572 rmem->vmem = (void **)&rxr->rx_buf_ring;
3573
3574 ring = &rxr->rx_agg_ring_struct;
3575 rmem = &ring->ring_mem;
3576 rmem->nr_pages = bp->rx_agg_nr_pages;
3577 rmem->page_size = HW_RXBD_RING_SIZE;
3578 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3579 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3580 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3581 rmem->vmem = (void **)&rxr->rx_agg_ring;
3582
3583 skip_rx:
3584 txr = bnapi->tx_ring;
3585 if (!txr)
3586 continue;
3587
3588 ring = &txr->tx_ring_struct;
3589 rmem = &ring->ring_mem;
3590 rmem->nr_pages = bp->tx_nr_pages;
3591 rmem->page_size = HW_RXBD_RING_SIZE;
3592 rmem->pg_arr = (void **)txr->tx_desc_ring;
3593 rmem->dma_arr = txr->tx_desc_mapping;
3594 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3595 rmem->vmem = (void **)&txr->tx_buf_ring;
3596 }
3597 }
3598
bnxt_init_rxbd_pages(struct bnxt_ring_struct * ring,u32 type)3599 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3600 {
3601 int i;
3602 u32 prod;
3603 struct rx_bd **rx_buf_ring;
3604
3605 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3606 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3607 int j;
3608 struct rx_bd *rxbd;
3609
3610 rxbd = rx_buf_ring[i];
3611 if (!rxbd)
3612 continue;
3613
3614 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3615 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3616 rxbd->rx_bd_opaque = prod;
3617 }
3618 }
3619 }
3620
bnxt_alloc_one_rx_ring(struct bnxt * bp,int ring_nr)3621 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3622 {
3623 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3624 struct net_device *dev = bp->dev;
3625 u32 prod;
3626 int i;
3627
3628 prod = rxr->rx_prod;
3629 for (i = 0; i < bp->rx_ring_size; i++) {
3630 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3631 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3632 ring_nr, i, bp->rx_ring_size);
3633 break;
3634 }
3635 prod = NEXT_RX(prod);
3636 }
3637 rxr->rx_prod = prod;
3638
3639 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3640 return 0;
3641
3642 prod = rxr->rx_agg_prod;
3643 for (i = 0; i < bp->rx_agg_ring_size; i++) {
3644 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3645 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3646 ring_nr, i, bp->rx_ring_size);
3647 break;
3648 }
3649 prod = NEXT_RX_AGG(prod);
3650 }
3651 rxr->rx_agg_prod = prod;
3652
3653 if (rxr->rx_tpa) {
3654 dma_addr_t mapping;
3655 u8 *data;
3656
3657 for (i = 0; i < bp->max_tpa; i++) {
3658 data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3659 if (!data)
3660 return -ENOMEM;
3661
3662 rxr->rx_tpa[i].data = data;
3663 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3664 rxr->rx_tpa[i].mapping = mapping;
3665 }
3666 }
3667 return 0;
3668 }
3669
bnxt_init_one_rx_ring(struct bnxt * bp,int ring_nr)3670 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3671 {
3672 struct bnxt_rx_ring_info *rxr;
3673 struct bnxt_ring_struct *ring;
3674 u32 type;
3675
3676 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3677 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3678
3679 if (NET_IP_ALIGN == 2)
3680 type |= RX_BD_FLAGS_SOP;
3681
3682 rxr = &bp->rx_ring[ring_nr];
3683 ring = &rxr->rx_ring_struct;
3684 bnxt_init_rxbd_pages(ring, type);
3685
3686 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3687 bpf_prog_add(bp->xdp_prog, 1);
3688 rxr->xdp_prog = bp->xdp_prog;
3689 }
3690 ring->fw_ring_id = INVALID_HW_RING_ID;
3691
3692 ring = &rxr->rx_agg_ring_struct;
3693 ring->fw_ring_id = INVALID_HW_RING_ID;
3694
3695 if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3696 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3697 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3698
3699 bnxt_init_rxbd_pages(ring, type);
3700 }
3701
3702 return bnxt_alloc_one_rx_ring(bp, ring_nr);
3703 }
3704
bnxt_init_cp_rings(struct bnxt * bp)3705 static void bnxt_init_cp_rings(struct bnxt *bp)
3706 {
3707 int i, j;
3708
3709 for (i = 0; i < bp->cp_nr_rings; i++) {
3710 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3711 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3712
3713 ring->fw_ring_id = INVALID_HW_RING_ID;
3714 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3715 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3716 for (j = 0; j < 2; j++) {
3717 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3718
3719 if (!cpr2)
3720 continue;
3721
3722 ring = &cpr2->cp_ring_struct;
3723 ring->fw_ring_id = INVALID_HW_RING_ID;
3724 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3725 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3726 }
3727 }
3728 }
3729
bnxt_init_rx_rings(struct bnxt * bp)3730 static int bnxt_init_rx_rings(struct bnxt *bp)
3731 {
3732 int i, rc = 0;
3733
3734 if (BNXT_RX_PAGE_MODE(bp)) {
3735 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3736 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3737 } else {
3738 bp->rx_offset = BNXT_RX_OFFSET;
3739 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3740 }
3741
3742 for (i = 0; i < bp->rx_nr_rings; i++) {
3743 rc = bnxt_init_one_rx_ring(bp, i);
3744 if (rc)
3745 break;
3746 }
3747
3748 return rc;
3749 }
3750
bnxt_init_tx_rings(struct bnxt * bp)3751 static int bnxt_init_tx_rings(struct bnxt *bp)
3752 {
3753 u16 i;
3754
3755 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3756 BNXT_MIN_TX_DESC_CNT);
3757
3758 for (i = 0; i < bp->tx_nr_rings; i++) {
3759 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3760 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3761
3762 ring->fw_ring_id = INVALID_HW_RING_ID;
3763 }
3764
3765 return 0;
3766 }
3767
bnxt_free_ring_grps(struct bnxt * bp)3768 static void bnxt_free_ring_grps(struct bnxt *bp)
3769 {
3770 kfree(bp->grp_info);
3771 bp->grp_info = NULL;
3772 }
3773
bnxt_init_ring_grps(struct bnxt * bp,bool irq_re_init)3774 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3775 {
3776 int i;
3777
3778 if (irq_re_init) {
3779 bp->grp_info = kcalloc(bp->cp_nr_rings,
3780 sizeof(struct bnxt_ring_grp_info),
3781 GFP_KERNEL);
3782 if (!bp->grp_info)
3783 return -ENOMEM;
3784 }
3785 for (i = 0; i < bp->cp_nr_rings; i++) {
3786 if (irq_re_init)
3787 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3788 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3789 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3790 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3791 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3792 }
3793 return 0;
3794 }
3795
bnxt_free_vnics(struct bnxt * bp)3796 static void bnxt_free_vnics(struct bnxt *bp)
3797 {
3798 kfree(bp->vnic_info);
3799 bp->vnic_info = NULL;
3800 bp->nr_vnics = 0;
3801 }
3802
bnxt_alloc_vnics(struct bnxt * bp)3803 static int bnxt_alloc_vnics(struct bnxt *bp)
3804 {
3805 int num_vnics = 1;
3806
3807 #ifdef CONFIG_RFS_ACCEL
3808 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3809 num_vnics += bp->rx_nr_rings;
3810 #endif
3811
3812 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3813 num_vnics++;
3814
3815 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3816 GFP_KERNEL);
3817 if (!bp->vnic_info)
3818 return -ENOMEM;
3819
3820 bp->nr_vnics = num_vnics;
3821 return 0;
3822 }
3823
bnxt_init_vnics(struct bnxt * bp)3824 static void bnxt_init_vnics(struct bnxt *bp)
3825 {
3826 int i;
3827
3828 for (i = 0; i < bp->nr_vnics; i++) {
3829 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3830 int j;
3831
3832 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3833 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3834 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3835
3836 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3837
3838 if (bp->vnic_info[i].rss_hash_key) {
3839 if (i == 0)
3840 get_random_bytes(vnic->rss_hash_key,
3841 HW_HASH_KEY_SIZE);
3842 else
3843 memcpy(vnic->rss_hash_key,
3844 bp->vnic_info[0].rss_hash_key,
3845 HW_HASH_KEY_SIZE);
3846 }
3847 }
3848 }
3849
bnxt_calc_nr_ring_pages(u32 ring_size,int desc_per_pg)3850 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3851 {
3852 int pages;
3853
3854 pages = ring_size / desc_per_pg;
3855
3856 if (!pages)
3857 return 1;
3858
3859 pages++;
3860
3861 while (pages & (pages - 1))
3862 pages++;
3863
3864 return pages;
3865 }
3866
bnxt_set_tpa_flags(struct bnxt * bp)3867 void bnxt_set_tpa_flags(struct bnxt *bp)
3868 {
3869 bp->flags &= ~BNXT_FLAG_TPA;
3870 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3871 return;
3872 if (bp->dev->features & NETIF_F_LRO)
3873 bp->flags |= BNXT_FLAG_LRO;
3874 else if (bp->dev->features & NETIF_F_GRO_HW)
3875 bp->flags |= BNXT_FLAG_GRO;
3876 }
3877
3878 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3879 * be set on entry.
3880 */
bnxt_set_ring_params(struct bnxt * bp)3881 void bnxt_set_ring_params(struct bnxt *bp)
3882 {
3883 u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3884 u32 agg_factor = 0, agg_ring_size = 0;
3885
3886 /* 8 for CRC and VLAN */
3887 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3888
3889 rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
3890 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3891
3892 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3893 ring_size = bp->rx_ring_size;
3894 bp->rx_agg_ring_size = 0;
3895 bp->rx_agg_nr_pages = 0;
3896
3897 if (bp->flags & BNXT_FLAG_TPA)
3898 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3899
3900 bp->flags &= ~BNXT_FLAG_JUMBO;
3901 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3902 u32 jumbo_factor;
3903
3904 bp->flags |= BNXT_FLAG_JUMBO;
3905 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3906 if (jumbo_factor > agg_factor)
3907 agg_factor = jumbo_factor;
3908 }
3909 if (agg_factor) {
3910 if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3911 ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3912 netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3913 bp->rx_ring_size, ring_size);
3914 bp->rx_ring_size = ring_size;
3915 }
3916 agg_ring_size = ring_size * agg_factor;
3917
3918 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3919 RX_DESC_CNT);
3920 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3921 u32 tmp = agg_ring_size;
3922
3923 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3924 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3925 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3926 tmp, agg_ring_size);
3927 }
3928 bp->rx_agg_ring_size = agg_ring_size;
3929 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3930
3931 if (BNXT_RX_PAGE_MODE(bp)) {
3932 rx_space = PAGE_SIZE;
3933 rx_size = PAGE_SIZE -
3934 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
3935 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3936 } else {
3937 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3938 rx_space = rx_size + NET_SKB_PAD +
3939 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3940 }
3941 }
3942
3943 bp->rx_buf_use_size = rx_size;
3944 bp->rx_buf_size = rx_space;
3945
3946 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3947 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3948
3949 ring_size = bp->tx_ring_size;
3950 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3951 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3952
3953 max_rx_cmpl = bp->rx_ring_size;
3954 /* MAX TPA needs to be added because TPA_START completions are
3955 * immediately recycled, so the TPA completions are not bound by
3956 * the RX ring size.
3957 */
3958 if (bp->flags & BNXT_FLAG_TPA)
3959 max_rx_cmpl += bp->max_tpa;
3960 /* RX and TPA completions are 32-byte, all others are 16-byte */
3961 ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3962 bp->cp_ring_size = ring_size;
3963
3964 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3965 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3966 bp->cp_nr_pages = MAX_CP_PAGES;
3967 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3968 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3969 ring_size, bp->cp_ring_size);
3970 }
3971 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3972 bp->cp_ring_mask = bp->cp_bit - 1;
3973 }
3974
3975 /* Changing allocation mode of RX rings.
3976 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3977 */
bnxt_set_rx_skb_mode(struct bnxt * bp,bool page_mode)3978 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3979 {
3980 struct net_device *dev = bp->dev;
3981
3982 if (page_mode) {
3983 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3984 bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
3985
3986 if (bp->xdp_prog->aux->xdp_has_frags)
3987 dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
3988 else
3989 dev->max_mtu =
3990 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3991 if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
3992 bp->flags |= BNXT_FLAG_JUMBO;
3993 bp->rx_skb_func = bnxt_rx_multi_page_skb;
3994 } else {
3995 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
3996 bp->rx_skb_func = bnxt_rx_page_skb;
3997 }
3998 bp->rx_dir = DMA_BIDIRECTIONAL;
3999 /* Disable LRO or GRO_HW */
4000 netdev_update_features(dev);
4001 } else {
4002 dev->max_mtu = bp->max_mtu;
4003 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4004 bp->rx_dir = DMA_FROM_DEVICE;
4005 bp->rx_skb_func = bnxt_rx_skb;
4006 }
4007 return 0;
4008 }
4009
bnxt_free_vnic_attributes(struct bnxt * bp)4010 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4011 {
4012 int i;
4013 struct bnxt_vnic_info *vnic;
4014 struct pci_dev *pdev = bp->pdev;
4015
4016 if (!bp->vnic_info)
4017 return;
4018
4019 for (i = 0; i < bp->nr_vnics; i++) {
4020 vnic = &bp->vnic_info[i];
4021
4022 kfree(vnic->fw_grp_ids);
4023 vnic->fw_grp_ids = NULL;
4024
4025 kfree(vnic->uc_list);
4026 vnic->uc_list = NULL;
4027
4028 if (vnic->mc_list) {
4029 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4030 vnic->mc_list, vnic->mc_list_mapping);
4031 vnic->mc_list = NULL;
4032 }
4033
4034 if (vnic->rss_table) {
4035 dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4036 vnic->rss_table,
4037 vnic->rss_table_dma_addr);
4038 vnic->rss_table = NULL;
4039 }
4040
4041 vnic->rss_hash_key = NULL;
4042 vnic->flags = 0;
4043 }
4044 }
4045
bnxt_alloc_vnic_attributes(struct bnxt * bp)4046 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4047 {
4048 int i, rc = 0, size;
4049 struct bnxt_vnic_info *vnic;
4050 struct pci_dev *pdev = bp->pdev;
4051 int max_rings;
4052
4053 for (i = 0; i < bp->nr_vnics; i++) {
4054 vnic = &bp->vnic_info[i];
4055
4056 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4057 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4058
4059 if (mem_size > 0) {
4060 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4061 if (!vnic->uc_list) {
4062 rc = -ENOMEM;
4063 goto out;
4064 }
4065 }
4066 }
4067
4068 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4069 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4070 vnic->mc_list =
4071 dma_alloc_coherent(&pdev->dev,
4072 vnic->mc_list_size,
4073 &vnic->mc_list_mapping,
4074 GFP_KERNEL);
4075 if (!vnic->mc_list) {
4076 rc = -ENOMEM;
4077 goto out;
4078 }
4079 }
4080
4081 if (bp->flags & BNXT_FLAG_CHIP_P5)
4082 goto vnic_skip_grps;
4083
4084 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4085 max_rings = bp->rx_nr_rings;
4086 else
4087 max_rings = 1;
4088
4089 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4090 if (!vnic->fw_grp_ids) {
4091 rc = -ENOMEM;
4092 goto out;
4093 }
4094 vnic_skip_grps:
4095 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4096 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4097 continue;
4098
4099 /* Allocate rss table and hash key */
4100 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4101 if (bp->flags & BNXT_FLAG_CHIP_P5)
4102 size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4103
4104 vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4105 vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4106 vnic->rss_table_size,
4107 &vnic->rss_table_dma_addr,
4108 GFP_KERNEL);
4109 if (!vnic->rss_table) {
4110 rc = -ENOMEM;
4111 goto out;
4112 }
4113
4114 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4115 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4116 }
4117 return 0;
4118
4119 out:
4120 return rc;
4121 }
4122
bnxt_free_hwrm_resources(struct bnxt * bp)4123 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4124 {
4125 struct bnxt_hwrm_wait_token *token;
4126
4127 dma_pool_destroy(bp->hwrm_dma_pool);
4128 bp->hwrm_dma_pool = NULL;
4129
4130 rcu_read_lock();
4131 hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4132 WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4133 rcu_read_unlock();
4134 }
4135
bnxt_alloc_hwrm_resources(struct bnxt * bp)4136 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4137 {
4138 bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4139 BNXT_HWRM_DMA_SIZE,
4140 BNXT_HWRM_DMA_ALIGN, 0);
4141 if (!bp->hwrm_dma_pool)
4142 return -ENOMEM;
4143
4144 INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4145
4146 return 0;
4147 }
4148
bnxt_free_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats)4149 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4150 {
4151 kfree(stats->hw_masks);
4152 stats->hw_masks = NULL;
4153 kfree(stats->sw_stats);
4154 stats->sw_stats = NULL;
4155 if (stats->hw_stats) {
4156 dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4157 stats->hw_stats_map);
4158 stats->hw_stats = NULL;
4159 }
4160 }
4161
bnxt_alloc_stats_mem(struct bnxt * bp,struct bnxt_stats_mem * stats,bool alloc_masks)4162 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4163 bool alloc_masks)
4164 {
4165 stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4166 &stats->hw_stats_map, GFP_KERNEL);
4167 if (!stats->hw_stats)
4168 return -ENOMEM;
4169
4170 stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4171 if (!stats->sw_stats)
4172 goto stats_mem_err;
4173
4174 if (alloc_masks) {
4175 stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4176 if (!stats->hw_masks)
4177 goto stats_mem_err;
4178 }
4179 return 0;
4180
4181 stats_mem_err:
4182 bnxt_free_stats_mem(bp, stats);
4183 return -ENOMEM;
4184 }
4185
bnxt_fill_masks(u64 * mask_arr,u64 mask,int count)4186 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4187 {
4188 int i;
4189
4190 for (i = 0; i < count; i++)
4191 mask_arr[i] = mask;
4192 }
4193
bnxt_copy_hw_masks(u64 * mask_arr,__le64 * hw_mask_arr,int count)4194 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4195 {
4196 int i;
4197
4198 for (i = 0; i < count; i++)
4199 mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4200 }
4201
bnxt_hwrm_func_qstat_ext(struct bnxt * bp,struct bnxt_stats_mem * stats)4202 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4203 struct bnxt_stats_mem *stats)
4204 {
4205 struct hwrm_func_qstats_ext_output *resp;
4206 struct hwrm_func_qstats_ext_input *req;
4207 __le64 *hw_masks;
4208 int rc;
4209
4210 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4211 !(bp->flags & BNXT_FLAG_CHIP_P5))
4212 return -EOPNOTSUPP;
4213
4214 rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4215 if (rc)
4216 return rc;
4217
4218 req->fid = cpu_to_le16(0xffff);
4219 req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4220
4221 resp = hwrm_req_hold(bp, req);
4222 rc = hwrm_req_send(bp, req);
4223 if (!rc) {
4224 hw_masks = &resp->rx_ucast_pkts;
4225 bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4226 }
4227 hwrm_req_drop(bp, req);
4228 return rc;
4229 }
4230
4231 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4232 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4233
bnxt_init_stats(struct bnxt * bp)4234 static void bnxt_init_stats(struct bnxt *bp)
4235 {
4236 struct bnxt_napi *bnapi = bp->bnapi[0];
4237 struct bnxt_cp_ring_info *cpr;
4238 struct bnxt_stats_mem *stats;
4239 __le64 *rx_stats, *tx_stats;
4240 int rc, rx_count, tx_count;
4241 u64 *rx_masks, *tx_masks;
4242 u64 mask;
4243 u8 flags;
4244
4245 cpr = &bnapi->cp_ring;
4246 stats = &cpr->stats;
4247 rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4248 if (rc) {
4249 if (bp->flags & BNXT_FLAG_CHIP_P5)
4250 mask = (1ULL << 48) - 1;
4251 else
4252 mask = -1ULL;
4253 bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4254 }
4255 if (bp->flags & BNXT_FLAG_PORT_STATS) {
4256 stats = &bp->port_stats;
4257 rx_stats = stats->hw_stats;
4258 rx_masks = stats->hw_masks;
4259 rx_count = sizeof(struct rx_port_stats) / 8;
4260 tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4261 tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4262 tx_count = sizeof(struct tx_port_stats) / 8;
4263
4264 flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4265 rc = bnxt_hwrm_port_qstats(bp, flags);
4266 if (rc) {
4267 mask = (1ULL << 40) - 1;
4268
4269 bnxt_fill_masks(rx_masks, mask, rx_count);
4270 bnxt_fill_masks(tx_masks, mask, tx_count);
4271 } else {
4272 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4273 bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4274 bnxt_hwrm_port_qstats(bp, 0);
4275 }
4276 }
4277 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4278 stats = &bp->rx_port_stats_ext;
4279 rx_stats = stats->hw_stats;
4280 rx_masks = stats->hw_masks;
4281 rx_count = sizeof(struct rx_port_stats_ext) / 8;
4282 stats = &bp->tx_port_stats_ext;
4283 tx_stats = stats->hw_stats;
4284 tx_masks = stats->hw_masks;
4285 tx_count = sizeof(struct tx_port_stats_ext) / 8;
4286
4287 flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4288 rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4289 if (rc) {
4290 mask = (1ULL << 40) - 1;
4291
4292 bnxt_fill_masks(rx_masks, mask, rx_count);
4293 if (tx_stats)
4294 bnxt_fill_masks(tx_masks, mask, tx_count);
4295 } else {
4296 bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4297 if (tx_stats)
4298 bnxt_copy_hw_masks(tx_masks, tx_stats,
4299 tx_count);
4300 bnxt_hwrm_port_qstats_ext(bp, 0);
4301 }
4302 }
4303 }
4304
bnxt_free_port_stats(struct bnxt * bp)4305 static void bnxt_free_port_stats(struct bnxt *bp)
4306 {
4307 bp->flags &= ~BNXT_FLAG_PORT_STATS;
4308 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4309
4310 bnxt_free_stats_mem(bp, &bp->port_stats);
4311 bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4312 bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4313 }
4314
bnxt_free_ring_stats(struct bnxt * bp)4315 static void bnxt_free_ring_stats(struct bnxt *bp)
4316 {
4317 int i;
4318
4319 if (!bp->bnapi)
4320 return;
4321
4322 for (i = 0; i < bp->cp_nr_rings; i++) {
4323 struct bnxt_napi *bnapi = bp->bnapi[i];
4324 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4325
4326 bnxt_free_stats_mem(bp, &cpr->stats);
4327 }
4328 }
4329
bnxt_alloc_stats(struct bnxt * bp)4330 static int bnxt_alloc_stats(struct bnxt *bp)
4331 {
4332 u32 size, i;
4333 int rc;
4334
4335 size = bp->hw_ring_stats_size;
4336
4337 for (i = 0; i < bp->cp_nr_rings; i++) {
4338 struct bnxt_napi *bnapi = bp->bnapi[i];
4339 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4340
4341 cpr->stats.len = size;
4342 rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4343 if (rc)
4344 return rc;
4345
4346 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4347 }
4348
4349 if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4350 return 0;
4351
4352 if (bp->port_stats.hw_stats)
4353 goto alloc_ext_stats;
4354
4355 bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4356 rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4357 if (rc)
4358 return rc;
4359
4360 bp->flags |= BNXT_FLAG_PORT_STATS;
4361
4362 alloc_ext_stats:
4363 /* Display extended statistics only if FW supports it */
4364 if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4365 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4366 return 0;
4367
4368 if (bp->rx_port_stats_ext.hw_stats)
4369 goto alloc_tx_ext_stats;
4370
4371 bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4372 rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4373 /* Extended stats are optional */
4374 if (rc)
4375 return 0;
4376
4377 alloc_tx_ext_stats:
4378 if (bp->tx_port_stats_ext.hw_stats)
4379 return 0;
4380
4381 if (bp->hwrm_spec_code >= 0x10902 ||
4382 (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4383 bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4384 rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4385 /* Extended stats are optional */
4386 if (rc)
4387 return 0;
4388 }
4389 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4390 return 0;
4391 }
4392
bnxt_clear_ring_indices(struct bnxt * bp)4393 static void bnxt_clear_ring_indices(struct bnxt *bp)
4394 {
4395 int i;
4396
4397 if (!bp->bnapi)
4398 return;
4399
4400 for (i = 0; i < bp->cp_nr_rings; i++) {
4401 struct bnxt_napi *bnapi = bp->bnapi[i];
4402 struct bnxt_cp_ring_info *cpr;
4403 struct bnxt_rx_ring_info *rxr;
4404 struct bnxt_tx_ring_info *txr;
4405
4406 if (!bnapi)
4407 continue;
4408
4409 cpr = &bnapi->cp_ring;
4410 cpr->cp_raw_cons = 0;
4411
4412 txr = bnapi->tx_ring;
4413 if (txr) {
4414 txr->tx_prod = 0;
4415 txr->tx_cons = 0;
4416 }
4417
4418 rxr = bnapi->rx_ring;
4419 if (rxr) {
4420 rxr->rx_prod = 0;
4421 rxr->rx_agg_prod = 0;
4422 rxr->rx_sw_agg_prod = 0;
4423 rxr->rx_next_cons = 0;
4424 }
4425 }
4426 }
4427
bnxt_free_ntp_fltrs(struct bnxt * bp,bool irq_reinit)4428 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4429 {
4430 #ifdef CONFIG_RFS_ACCEL
4431 int i;
4432
4433 /* Under rtnl_lock and all our NAPIs have been disabled. It's
4434 * safe to delete the hash table.
4435 */
4436 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4437 struct hlist_head *head;
4438 struct hlist_node *tmp;
4439 struct bnxt_ntuple_filter *fltr;
4440
4441 head = &bp->ntp_fltr_hash_tbl[i];
4442 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4443 hlist_del(&fltr->hash);
4444 kfree(fltr);
4445 }
4446 }
4447 if (irq_reinit) {
4448 bitmap_free(bp->ntp_fltr_bmap);
4449 bp->ntp_fltr_bmap = NULL;
4450 }
4451 bp->ntp_fltr_count = 0;
4452 #endif
4453 }
4454
bnxt_alloc_ntp_fltrs(struct bnxt * bp)4455 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4456 {
4457 #ifdef CONFIG_RFS_ACCEL
4458 int i, rc = 0;
4459
4460 if (!(bp->flags & BNXT_FLAG_RFS))
4461 return 0;
4462
4463 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4464 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4465
4466 bp->ntp_fltr_count = 0;
4467 bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
4468
4469 if (!bp->ntp_fltr_bmap)
4470 rc = -ENOMEM;
4471
4472 return rc;
4473 #else
4474 return 0;
4475 #endif
4476 }
4477
bnxt_free_mem(struct bnxt * bp,bool irq_re_init)4478 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4479 {
4480 bnxt_free_vnic_attributes(bp);
4481 bnxt_free_tx_rings(bp);
4482 bnxt_free_rx_rings(bp);
4483 bnxt_free_cp_rings(bp);
4484 bnxt_free_all_cp_arrays(bp);
4485 bnxt_free_ntp_fltrs(bp, irq_re_init);
4486 if (irq_re_init) {
4487 bnxt_free_ring_stats(bp);
4488 if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4489 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4490 bnxt_free_port_stats(bp);
4491 bnxt_free_ring_grps(bp);
4492 bnxt_free_vnics(bp);
4493 kfree(bp->tx_ring_map);
4494 bp->tx_ring_map = NULL;
4495 kfree(bp->tx_ring);
4496 bp->tx_ring = NULL;
4497 kfree(bp->rx_ring);
4498 bp->rx_ring = NULL;
4499 kfree(bp->bnapi);
4500 bp->bnapi = NULL;
4501 } else {
4502 bnxt_clear_ring_indices(bp);
4503 }
4504 }
4505
bnxt_alloc_mem(struct bnxt * bp,bool irq_re_init)4506 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4507 {
4508 int i, j, rc, size, arr_size;
4509 void *bnapi;
4510
4511 if (irq_re_init) {
4512 /* Allocate bnapi mem pointer array and mem block for
4513 * all queues
4514 */
4515 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4516 bp->cp_nr_rings);
4517 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4518 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4519 if (!bnapi)
4520 return -ENOMEM;
4521
4522 bp->bnapi = bnapi;
4523 bnapi += arr_size;
4524 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4525 bp->bnapi[i] = bnapi;
4526 bp->bnapi[i]->index = i;
4527 bp->bnapi[i]->bp = bp;
4528 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4529 struct bnxt_cp_ring_info *cpr =
4530 &bp->bnapi[i]->cp_ring;
4531
4532 cpr->cp_ring_struct.ring_mem.flags =
4533 BNXT_RMEM_RING_PTE_FLAG;
4534 }
4535 }
4536
4537 bp->rx_ring = kcalloc(bp->rx_nr_rings,
4538 sizeof(struct bnxt_rx_ring_info),
4539 GFP_KERNEL);
4540 if (!bp->rx_ring)
4541 return -ENOMEM;
4542
4543 for (i = 0; i < bp->rx_nr_rings; i++) {
4544 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4545
4546 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4547 rxr->rx_ring_struct.ring_mem.flags =
4548 BNXT_RMEM_RING_PTE_FLAG;
4549 rxr->rx_agg_ring_struct.ring_mem.flags =
4550 BNXT_RMEM_RING_PTE_FLAG;
4551 }
4552 rxr->bnapi = bp->bnapi[i];
4553 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4554 }
4555
4556 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4557 sizeof(struct bnxt_tx_ring_info),
4558 GFP_KERNEL);
4559 if (!bp->tx_ring)
4560 return -ENOMEM;
4561
4562 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4563 GFP_KERNEL);
4564
4565 if (!bp->tx_ring_map)
4566 return -ENOMEM;
4567
4568 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4569 j = 0;
4570 else
4571 j = bp->rx_nr_rings;
4572
4573 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4574 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4575
4576 if (bp->flags & BNXT_FLAG_CHIP_P5)
4577 txr->tx_ring_struct.ring_mem.flags =
4578 BNXT_RMEM_RING_PTE_FLAG;
4579 txr->bnapi = bp->bnapi[j];
4580 bp->bnapi[j]->tx_ring = txr;
4581 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4582 if (i >= bp->tx_nr_rings_xdp) {
4583 txr->txq_index = i - bp->tx_nr_rings_xdp;
4584 bp->bnapi[j]->tx_int = bnxt_tx_int;
4585 } else {
4586 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4587 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4588 }
4589 }
4590
4591 rc = bnxt_alloc_stats(bp);
4592 if (rc)
4593 goto alloc_mem_err;
4594 bnxt_init_stats(bp);
4595
4596 rc = bnxt_alloc_ntp_fltrs(bp);
4597 if (rc)
4598 goto alloc_mem_err;
4599
4600 rc = bnxt_alloc_vnics(bp);
4601 if (rc)
4602 goto alloc_mem_err;
4603 }
4604
4605 rc = bnxt_alloc_all_cp_arrays(bp);
4606 if (rc)
4607 goto alloc_mem_err;
4608
4609 bnxt_init_ring_struct(bp);
4610
4611 rc = bnxt_alloc_rx_rings(bp);
4612 if (rc)
4613 goto alloc_mem_err;
4614
4615 rc = bnxt_alloc_tx_rings(bp);
4616 if (rc)
4617 goto alloc_mem_err;
4618
4619 rc = bnxt_alloc_cp_rings(bp);
4620 if (rc)
4621 goto alloc_mem_err;
4622
4623 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4624 BNXT_VNIC_UCAST_FLAG;
4625 rc = bnxt_alloc_vnic_attributes(bp);
4626 if (rc)
4627 goto alloc_mem_err;
4628 return 0;
4629
4630 alloc_mem_err:
4631 bnxt_free_mem(bp, true);
4632 return rc;
4633 }
4634
bnxt_disable_int(struct bnxt * bp)4635 static void bnxt_disable_int(struct bnxt *bp)
4636 {
4637 int i;
4638
4639 if (!bp->bnapi)
4640 return;
4641
4642 for (i = 0; i < bp->cp_nr_rings; i++) {
4643 struct bnxt_napi *bnapi = bp->bnapi[i];
4644 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4645 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4646
4647 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4648 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4649 }
4650 }
4651
bnxt_cp_num_to_irq_num(struct bnxt * bp,int n)4652 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4653 {
4654 struct bnxt_napi *bnapi = bp->bnapi[n];
4655 struct bnxt_cp_ring_info *cpr;
4656
4657 cpr = &bnapi->cp_ring;
4658 return cpr->cp_ring_struct.map_idx;
4659 }
4660
bnxt_disable_int_sync(struct bnxt * bp)4661 static void bnxt_disable_int_sync(struct bnxt *bp)
4662 {
4663 int i;
4664
4665 if (!bp->irq_tbl)
4666 return;
4667
4668 atomic_inc(&bp->intr_sem);
4669
4670 bnxt_disable_int(bp);
4671 for (i = 0; i < bp->cp_nr_rings; i++) {
4672 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4673
4674 synchronize_irq(bp->irq_tbl[map_idx].vector);
4675 }
4676 }
4677
bnxt_enable_int(struct bnxt * bp)4678 static void bnxt_enable_int(struct bnxt *bp)
4679 {
4680 int i;
4681
4682 atomic_set(&bp->intr_sem, 0);
4683 for (i = 0; i < bp->cp_nr_rings; i++) {
4684 struct bnxt_napi *bnapi = bp->bnapi[i];
4685 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4686
4687 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4688 }
4689 }
4690
bnxt_hwrm_func_drv_rgtr(struct bnxt * bp,unsigned long * bmap,int bmap_size,bool async_only)4691 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4692 bool async_only)
4693 {
4694 DECLARE_BITMAP(async_events_bmap, 256);
4695 u32 *events = (u32 *)async_events_bmap;
4696 struct hwrm_func_drv_rgtr_output *resp;
4697 struct hwrm_func_drv_rgtr_input *req;
4698 u32 flags;
4699 int rc, i;
4700
4701 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4702 if (rc)
4703 return rc;
4704
4705 req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4706 FUNC_DRV_RGTR_REQ_ENABLES_VER |
4707 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4708
4709 req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4710 flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4711 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4712 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4713 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4714 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4715 FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4716 req->flags = cpu_to_le32(flags);
4717 req->ver_maj_8b = DRV_VER_MAJ;
4718 req->ver_min_8b = DRV_VER_MIN;
4719 req->ver_upd_8b = DRV_VER_UPD;
4720 req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4721 req->ver_min = cpu_to_le16(DRV_VER_MIN);
4722 req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4723
4724 if (BNXT_PF(bp)) {
4725 u32 data[8];
4726 int i;
4727
4728 memset(data, 0, sizeof(data));
4729 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4730 u16 cmd = bnxt_vf_req_snif[i];
4731 unsigned int bit, idx;
4732
4733 idx = cmd / 32;
4734 bit = cmd % 32;
4735 data[idx] |= 1 << bit;
4736 }
4737
4738 for (i = 0; i < 8; i++)
4739 req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4740
4741 req->enables |=
4742 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4743 }
4744
4745 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4746 req->flags |= cpu_to_le32(
4747 FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4748
4749 memset(async_events_bmap, 0, sizeof(async_events_bmap));
4750 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4751 u16 event_id = bnxt_async_events_arr[i];
4752
4753 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4754 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4755 continue;
4756 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
4757 !bp->ptp_cfg)
4758 continue;
4759 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4760 }
4761 if (bmap && bmap_size) {
4762 for (i = 0; i < bmap_size; i++) {
4763 if (test_bit(i, bmap))
4764 __set_bit(i, async_events_bmap);
4765 }
4766 }
4767 for (i = 0; i < 8; i++)
4768 req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4769
4770 if (async_only)
4771 req->enables =
4772 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4773
4774 resp = hwrm_req_hold(bp, req);
4775 rc = hwrm_req_send(bp, req);
4776 if (!rc) {
4777 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4778 if (resp->flags &
4779 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4780 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4781 }
4782 hwrm_req_drop(bp, req);
4783 return rc;
4784 }
4785
bnxt_hwrm_func_drv_unrgtr(struct bnxt * bp)4786 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4787 {
4788 struct hwrm_func_drv_unrgtr_input *req;
4789 int rc;
4790
4791 if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4792 return 0;
4793
4794 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4795 if (rc)
4796 return rc;
4797 return hwrm_req_send(bp, req);
4798 }
4799
bnxt_hwrm_tunnel_dst_port_free(struct bnxt * bp,u8 tunnel_type)4800 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4801 {
4802 struct hwrm_tunnel_dst_port_free_input *req;
4803 int rc;
4804
4805 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4806 bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4807 return 0;
4808 if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4809 bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4810 return 0;
4811
4812 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4813 if (rc)
4814 return rc;
4815
4816 req->tunnel_type = tunnel_type;
4817
4818 switch (tunnel_type) {
4819 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4820 req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4821 bp->vxlan_port = 0;
4822 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4823 break;
4824 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4825 req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4826 bp->nge_port = 0;
4827 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4828 break;
4829 default:
4830 break;
4831 }
4832
4833 rc = hwrm_req_send(bp, req);
4834 if (rc)
4835 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4836 rc);
4837 return rc;
4838 }
4839
bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt * bp,__be16 port,u8 tunnel_type)4840 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4841 u8 tunnel_type)
4842 {
4843 struct hwrm_tunnel_dst_port_alloc_output *resp;
4844 struct hwrm_tunnel_dst_port_alloc_input *req;
4845 int rc;
4846
4847 rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4848 if (rc)
4849 return rc;
4850
4851 req->tunnel_type = tunnel_type;
4852 req->tunnel_dst_port_val = port;
4853
4854 resp = hwrm_req_hold(bp, req);
4855 rc = hwrm_req_send(bp, req);
4856 if (rc) {
4857 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4858 rc);
4859 goto err_out;
4860 }
4861
4862 switch (tunnel_type) {
4863 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4864 bp->vxlan_port = port;
4865 bp->vxlan_fw_dst_port_id =
4866 le16_to_cpu(resp->tunnel_dst_port_id);
4867 break;
4868 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4869 bp->nge_port = port;
4870 bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4871 break;
4872 default:
4873 break;
4874 }
4875
4876 err_out:
4877 hwrm_req_drop(bp, req);
4878 return rc;
4879 }
4880
bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt * bp,u16 vnic_id)4881 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4882 {
4883 struct hwrm_cfa_l2_set_rx_mask_input *req;
4884 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4885 int rc;
4886
4887 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4888 if (rc)
4889 return rc;
4890
4891 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4892 if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4893 req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4894 req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4895 }
4896 req->mask = cpu_to_le32(vnic->rx_mask);
4897 return hwrm_req_send_silent(bp, req);
4898 }
4899
4900 #ifdef CONFIG_RFS_ACCEL
bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)4901 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4902 struct bnxt_ntuple_filter *fltr)
4903 {
4904 struct hwrm_cfa_ntuple_filter_free_input *req;
4905 int rc;
4906
4907 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4908 if (rc)
4909 return rc;
4910
4911 req->ntuple_filter_id = fltr->filter_id;
4912 return hwrm_req_send(bp, req);
4913 }
4914
4915 #define BNXT_NTP_FLTR_FLAGS \
4916 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4917 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4918 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4919 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4920 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4921 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4922 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4923 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4924 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4925 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4926 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4927 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4928 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
4929 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4930
4931 #define BNXT_NTP_TUNNEL_FLTR_FLAG \
4932 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4933
bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt * bp,struct bnxt_ntuple_filter * fltr)4934 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4935 struct bnxt_ntuple_filter *fltr)
4936 {
4937 struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4938 struct hwrm_cfa_ntuple_filter_alloc_input *req;
4939 struct flow_keys *keys = &fltr->fkeys;
4940 struct bnxt_vnic_info *vnic;
4941 u32 flags = 0;
4942 int rc;
4943
4944 rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4945 if (rc)
4946 return rc;
4947
4948 req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4949
4950 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4951 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4952 req->dst_id = cpu_to_le16(fltr->rxq);
4953 } else {
4954 vnic = &bp->vnic_info[fltr->rxq + 1];
4955 req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4956 }
4957 req->flags = cpu_to_le32(flags);
4958 req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4959
4960 req->ethertype = htons(ETH_P_IP);
4961 memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4962 req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4963 req->ip_protocol = keys->basic.ip_proto;
4964
4965 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4966 int i;
4967
4968 req->ethertype = htons(ETH_P_IPV6);
4969 req->ip_addr_type =
4970 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4971 *(struct in6_addr *)&req->src_ipaddr[0] =
4972 keys->addrs.v6addrs.src;
4973 *(struct in6_addr *)&req->dst_ipaddr[0] =
4974 keys->addrs.v6addrs.dst;
4975 for (i = 0; i < 4; i++) {
4976 req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4977 req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4978 }
4979 } else {
4980 req->src_ipaddr[0] = keys->addrs.v4addrs.src;
4981 req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4982 req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4983 req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4984 }
4985 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4986 req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4987 req->tunnel_type =
4988 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4989 }
4990
4991 req->src_port = keys->ports.src;
4992 req->src_port_mask = cpu_to_be16(0xffff);
4993 req->dst_port = keys->ports.dst;
4994 req->dst_port_mask = cpu_to_be16(0xffff);
4995
4996 resp = hwrm_req_hold(bp, req);
4997 rc = hwrm_req_send(bp, req);
4998 if (!rc)
4999 fltr->filter_id = resp->ntuple_filter_id;
5000 hwrm_req_drop(bp, req);
5001 return rc;
5002 }
5003 #endif
5004
bnxt_hwrm_set_vnic_filter(struct bnxt * bp,u16 vnic_id,u16 idx,const u8 * mac_addr)5005 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5006 const u8 *mac_addr)
5007 {
5008 struct hwrm_cfa_l2_filter_alloc_output *resp;
5009 struct hwrm_cfa_l2_filter_alloc_input *req;
5010 int rc;
5011
5012 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5013 if (rc)
5014 return rc;
5015
5016 req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5017 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5018 req->flags |=
5019 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5020 req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5021 req->enables =
5022 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5023 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5024 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5025 memcpy(req->l2_addr, mac_addr, ETH_ALEN);
5026 req->l2_addr_mask[0] = 0xff;
5027 req->l2_addr_mask[1] = 0xff;
5028 req->l2_addr_mask[2] = 0xff;
5029 req->l2_addr_mask[3] = 0xff;
5030 req->l2_addr_mask[4] = 0xff;
5031 req->l2_addr_mask[5] = 0xff;
5032
5033 resp = hwrm_req_hold(bp, req);
5034 rc = hwrm_req_send(bp, req);
5035 if (!rc)
5036 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5037 resp->l2_filter_id;
5038 hwrm_req_drop(bp, req);
5039 return rc;
5040 }
5041
bnxt_hwrm_clear_vnic_filter(struct bnxt * bp)5042 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5043 {
5044 struct hwrm_cfa_l2_filter_free_input *req;
5045 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5046 int rc;
5047
5048 /* Any associated ntuple filters will also be cleared by firmware. */
5049 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5050 if (rc)
5051 return rc;
5052 hwrm_req_hold(bp, req);
5053 for (i = 0; i < num_of_vnics; i++) {
5054 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5055
5056 for (j = 0; j < vnic->uc_filter_count; j++) {
5057 req->l2_filter_id = vnic->fw_l2_filter_id[j];
5058
5059 rc = hwrm_req_send(bp, req);
5060 }
5061 vnic->uc_filter_count = 0;
5062 }
5063 hwrm_req_drop(bp, req);
5064 return rc;
5065 }
5066
bnxt_hwrm_vnic_set_tpa(struct bnxt * bp,u16 vnic_id,u32 tpa_flags)5067 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5068 {
5069 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5070 u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5071 struct hwrm_vnic_tpa_cfg_input *req;
5072 int rc;
5073
5074 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5075 return 0;
5076
5077 rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5078 if (rc)
5079 return rc;
5080
5081 if (tpa_flags) {
5082 u16 mss = bp->dev->mtu - 40;
5083 u32 nsegs, n, segs = 0, flags;
5084
5085 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5086 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5087 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5088 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5089 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5090 if (tpa_flags & BNXT_FLAG_GRO)
5091 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5092
5093 req->flags = cpu_to_le32(flags);
5094
5095 req->enables =
5096 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5097 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5098 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5099
5100 /* Number of segs are log2 units, and first packet is not
5101 * included as part of this units.
5102 */
5103 if (mss <= BNXT_RX_PAGE_SIZE) {
5104 n = BNXT_RX_PAGE_SIZE / mss;
5105 nsegs = (MAX_SKB_FRAGS - 1) * n;
5106 } else {
5107 n = mss / BNXT_RX_PAGE_SIZE;
5108 if (mss & (BNXT_RX_PAGE_SIZE - 1))
5109 n++;
5110 nsegs = (MAX_SKB_FRAGS - n) / n;
5111 }
5112
5113 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5114 segs = MAX_TPA_SEGS_P5;
5115 max_aggs = bp->max_tpa;
5116 } else {
5117 segs = ilog2(nsegs);
5118 }
5119 req->max_agg_segs = cpu_to_le16(segs);
5120 req->max_aggs = cpu_to_le16(max_aggs);
5121
5122 req->min_agg_len = cpu_to_le32(512);
5123 }
5124 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5125
5126 return hwrm_req_send(bp, req);
5127 }
5128
bnxt_cp_ring_from_grp(struct bnxt * bp,struct bnxt_ring_struct * ring)5129 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5130 {
5131 struct bnxt_ring_grp_info *grp_info;
5132
5133 grp_info = &bp->grp_info[ring->grp_idx];
5134 return grp_info->cp_fw_ring_id;
5135 }
5136
bnxt_cp_ring_for_rx(struct bnxt * bp,struct bnxt_rx_ring_info * rxr)5137 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5138 {
5139 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5140 struct bnxt_napi *bnapi = rxr->bnapi;
5141 struct bnxt_cp_ring_info *cpr;
5142
5143 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5144 return cpr->cp_ring_struct.fw_ring_id;
5145 } else {
5146 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5147 }
5148 }
5149
bnxt_cp_ring_for_tx(struct bnxt * bp,struct bnxt_tx_ring_info * txr)5150 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5151 {
5152 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5153 struct bnxt_napi *bnapi = txr->bnapi;
5154 struct bnxt_cp_ring_info *cpr;
5155
5156 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5157 return cpr->cp_ring_struct.fw_ring_id;
5158 } else {
5159 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5160 }
5161 }
5162
bnxt_alloc_rss_indir_tbl(struct bnxt * bp)5163 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5164 {
5165 int entries;
5166
5167 if (bp->flags & BNXT_FLAG_CHIP_P5)
5168 entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5169 else
5170 entries = HW_HASH_INDEX_SIZE;
5171
5172 bp->rss_indir_tbl_entries = entries;
5173 bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5174 GFP_KERNEL);
5175 if (!bp->rss_indir_tbl)
5176 return -ENOMEM;
5177 return 0;
5178 }
5179
bnxt_set_dflt_rss_indir_tbl(struct bnxt * bp)5180 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5181 {
5182 u16 max_rings, max_entries, pad, i;
5183
5184 if (!bp->rx_nr_rings)
5185 return;
5186
5187 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5188 max_rings = bp->rx_nr_rings - 1;
5189 else
5190 max_rings = bp->rx_nr_rings;
5191
5192 max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5193
5194 for (i = 0; i < max_entries; i++)
5195 bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5196
5197 pad = bp->rss_indir_tbl_entries - max_entries;
5198 if (pad)
5199 memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5200 }
5201
bnxt_get_max_rss_ring(struct bnxt * bp)5202 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5203 {
5204 u16 i, tbl_size, max_ring = 0;
5205
5206 if (!bp->rss_indir_tbl)
5207 return 0;
5208
5209 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5210 for (i = 0; i < tbl_size; i++)
5211 max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5212 return max_ring;
5213 }
5214
bnxt_get_nr_rss_ctxs(struct bnxt * bp,int rx_rings)5215 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5216 {
5217 if (bp->flags & BNXT_FLAG_CHIP_P5)
5218 return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5219 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5220 return 2;
5221 return 1;
5222 }
5223
bnxt_fill_hw_rss_tbl(struct bnxt * bp,struct bnxt_vnic_info * vnic)5224 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5225 {
5226 bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5227 u16 i, j;
5228
5229 /* Fill the RSS indirection table with ring group ids */
5230 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5231 if (!no_rss)
5232 j = bp->rss_indir_tbl[i];
5233 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5234 }
5235 }
5236
bnxt_fill_hw_rss_tbl_p5(struct bnxt * bp,struct bnxt_vnic_info * vnic)5237 static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5238 struct bnxt_vnic_info *vnic)
5239 {
5240 __le16 *ring_tbl = vnic->rss_table;
5241 struct bnxt_rx_ring_info *rxr;
5242 u16 tbl_size, i;
5243
5244 tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5245
5246 for (i = 0; i < tbl_size; i++) {
5247 u16 ring_id, j;
5248
5249 j = bp->rss_indir_tbl[i];
5250 rxr = &bp->rx_ring[j];
5251
5252 ring_id = rxr->rx_ring_struct.fw_ring_id;
5253 *ring_tbl++ = cpu_to_le16(ring_id);
5254 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5255 *ring_tbl++ = cpu_to_le16(ring_id);
5256 }
5257 }
5258
5259 static void
__bnxt_hwrm_vnic_set_rss(struct bnxt * bp,struct hwrm_vnic_rss_cfg_input * req,struct bnxt_vnic_info * vnic)5260 __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
5261 struct bnxt_vnic_info *vnic)
5262 {
5263 if (bp->flags & BNXT_FLAG_CHIP_P5)
5264 bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5265 else
5266 bnxt_fill_hw_rss_tbl(bp, vnic);
5267
5268 if (bp->rss_hash_delta) {
5269 req->hash_type = cpu_to_le32(bp->rss_hash_delta);
5270 if (bp->rss_hash_cfg & bp->rss_hash_delta)
5271 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE;
5272 else
5273 req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE;
5274 } else {
5275 req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5276 }
5277 req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5278 req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5279 req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5280 }
5281
bnxt_hwrm_vnic_set_rss(struct bnxt * bp,u16 vnic_id,bool set_rss)5282 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5283 {
5284 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5285 struct hwrm_vnic_rss_cfg_input *req;
5286 int rc;
5287
5288 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5289 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5290 return 0;
5291
5292 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5293 if (rc)
5294 return rc;
5295
5296 if (set_rss)
5297 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5298 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5299 return hwrm_req_send(bp, req);
5300 }
5301
bnxt_hwrm_vnic_set_rss_p5(struct bnxt * bp,u16 vnic_id,bool set_rss)5302 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5303 {
5304 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5305 struct hwrm_vnic_rss_cfg_input *req;
5306 dma_addr_t ring_tbl_map;
5307 u32 i, nr_ctxs;
5308 int rc;
5309
5310 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5311 if (rc)
5312 return rc;
5313
5314 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5315 if (!set_rss)
5316 return hwrm_req_send(bp, req);
5317
5318 __bnxt_hwrm_vnic_set_rss(bp, req, vnic);
5319 ring_tbl_map = vnic->rss_table_dma_addr;
5320 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5321
5322 hwrm_req_hold(bp, req);
5323 for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5324 req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5325 req->ring_table_pair_index = i;
5326 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5327 rc = hwrm_req_send(bp, req);
5328 if (rc)
5329 goto exit;
5330 }
5331
5332 exit:
5333 hwrm_req_drop(bp, req);
5334 return rc;
5335 }
5336
bnxt_hwrm_update_rss_hash_cfg(struct bnxt * bp)5337 static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
5338 {
5339 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5340 struct hwrm_vnic_rss_qcfg_output *resp;
5341 struct hwrm_vnic_rss_qcfg_input *req;
5342
5343 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
5344 return;
5345
5346 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5347 /* all contexts configured to same hash_type, zero always exists */
5348 req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5349 resp = hwrm_req_hold(bp, req);
5350 if (!hwrm_req_send(bp, req)) {
5351 bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg;
5352 bp->rss_hash_delta = 0;
5353 }
5354 hwrm_req_drop(bp, req);
5355 }
5356
bnxt_hwrm_vnic_set_hds(struct bnxt * bp,u16 vnic_id)5357 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5358 {
5359 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5360 struct hwrm_vnic_plcmodes_cfg_input *req;
5361 int rc;
5362
5363 rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5364 if (rc)
5365 return rc;
5366
5367 req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
5368 req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
5369
5370 if (BNXT_RX_PAGE_MODE(bp)) {
5371 req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
5372 } else {
5373 req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5374 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5375 req->enables |=
5376 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5377 req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5378 req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5379 }
5380 req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5381 return hwrm_req_send(bp, req);
5382 }
5383
bnxt_hwrm_vnic_ctx_free_one(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)5384 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5385 u16 ctx_idx)
5386 {
5387 struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5388
5389 if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5390 return;
5391
5392 req->rss_cos_lb_ctx_id =
5393 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5394
5395 hwrm_req_send(bp, req);
5396 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5397 }
5398
bnxt_hwrm_vnic_ctx_free(struct bnxt * bp)5399 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5400 {
5401 int i, j;
5402
5403 for (i = 0; i < bp->nr_vnics; i++) {
5404 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5405
5406 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5407 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5408 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5409 }
5410 }
5411 bp->rsscos_nr_ctxs = 0;
5412 }
5413
bnxt_hwrm_vnic_ctx_alloc(struct bnxt * bp,u16 vnic_id,u16 ctx_idx)5414 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5415 {
5416 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5417 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5418 int rc;
5419
5420 rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5421 if (rc)
5422 return rc;
5423
5424 resp = hwrm_req_hold(bp, req);
5425 rc = hwrm_req_send(bp, req);
5426 if (!rc)
5427 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5428 le16_to_cpu(resp->rss_cos_lb_ctx_id);
5429 hwrm_req_drop(bp, req);
5430
5431 return rc;
5432 }
5433
bnxt_get_roce_vnic_mode(struct bnxt * bp)5434 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5435 {
5436 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5437 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5438 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5439 }
5440
bnxt_hwrm_vnic_cfg(struct bnxt * bp,u16 vnic_id)5441 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5442 {
5443 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5444 struct hwrm_vnic_cfg_input *req;
5445 unsigned int ring = 0, grp_idx;
5446 u16 def_vlan = 0;
5447 int rc;
5448
5449 rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5450 if (rc)
5451 return rc;
5452
5453 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5454 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5455
5456 req->default_rx_ring_id =
5457 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5458 req->default_cmpl_ring_id =
5459 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5460 req->enables =
5461 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5462 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5463 goto vnic_mru;
5464 }
5465 req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5466 /* Only RSS support for now TBD: COS & LB */
5467 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5468 req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5469 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5470 VNIC_CFG_REQ_ENABLES_MRU);
5471 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5472 req->rss_rule =
5473 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5474 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5475 VNIC_CFG_REQ_ENABLES_MRU);
5476 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5477 } else {
5478 req->rss_rule = cpu_to_le16(0xffff);
5479 }
5480
5481 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5482 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5483 req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5484 req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5485 } else {
5486 req->cos_rule = cpu_to_le16(0xffff);
5487 }
5488
5489 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5490 ring = 0;
5491 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5492 ring = vnic_id - 1;
5493 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5494 ring = bp->rx_nr_rings - 1;
5495
5496 grp_idx = bp->rx_ring[ring].bnapi->index;
5497 req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5498 req->lb_rule = cpu_to_le16(0xffff);
5499 vnic_mru:
5500 req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5501
5502 req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5503 #ifdef CONFIG_BNXT_SRIOV
5504 if (BNXT_VF(bp))
5505 def_vlan = bp->vf.vlan;
5506 #endif
5507 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5508 req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5509 if (!vnic_id && bnxt_ulp_registered(bp->edev))
5510 req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5511
5512 return hwrm_req_send(bp, req);
5513 }
5514
bnxt_hwrm_vnic_free_one(struct bnxt * bp,u16 vnic_id)5515 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5516 {
5517 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5518 struct hwrm_vnic_free_input *req;
5519
5520 if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5521 return;
5522
5523 req->vnic_id =
5524 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5525
5526 hwrm_req_send(bp, req);
5527 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5528 }
5529 }
5530
bnxt_hwrm_vnic_free(struct bnxt * bp)5531 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5532 {
5533 u16 i;
5534
5535 for (i = 0; i < bp->nr_vnics; i++)
5536 bnxt_hwrm_vnic_free_one(bp, i);
5537 }
5538
bnxt_hwrm_vnic_alloc(struct bnxt * bp,u16 vnic_id,unsigned int start_rx_ring_idx,unsigned int nr_rings)5539 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5540 unsigned int start_rx_ring_idx,
5541 unsigned int nr_rings)
5542 {
5543 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5544 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5545 struct hwrm_vnic_alloc_output *resp;
5546 struct hwrm_vnic_alloc_input *req;
5547 int rc;
5548
5549 rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5550 if (rc)
5551 return rc;
5552
5553 if (bp->flags & BNXT_FLAG_CHIP_P5)
5554 goto vnic_no_ring_grps;
5555
5556 /* map ring groups to this vnic */
5557 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5558 grp_idx = bp->rx_ring[i].bnapi->index;
5559 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5560 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5561 j, nr_rings);
5562 break;
5563 }
5564 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5565 }
5566
5567 vnic_no_ring_grps:
5568 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5569 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5570 if (vnic_id == 0)
5571 req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5572
5573 resp = hwrm_req_hold(bp, req);
5574 rc = hwrm_req_send(bp, req);
5575 if (!rc)
5576 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5577 hwrm_req_drop(bp, req);
5578 return rc;
5579 }
5580
bnxt_hwrm_vnic_qcaps(struct bnxt * bp)5581 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5582 {
5583 struct hwrm_vnic_qcaps_output *resp;
5584 struct hwrm_vnic_qcaps_input *req;
5585 int rc;
5586
5587 bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5588 bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5589 if (bp->hwrm_spec_code < 0x10600)
5590 return 0;
5591
5592 rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5593 if (rc)
5594 return rc;
5595
5596 resp = hwrm_req_hold(bp, req);
5597 rc = hwrm_req_send(bp, req);
5598 if (!rc) {
5599 u32 flags = le32_to_cpu(resp->flags);
5600
5601 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5602 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5603 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5604 if (flags &
5605 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5606 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5607
5608 /* Older P5 fw before EXT_HW_STATS support did not set
5609 * VLAN_STRIP_CAP properly.
5610 */
5611 if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5612 (BNXT_CHIP_P5_THOR(bp) &&
5613 !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5614 bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5615 if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP)
5616 bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA;
5617 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5618 if (bp->max_tpa_v2) {
5619 if (BNXT_CHIP_P5_THOR(bp))
5620 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5621 else
5622 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5623 }
5624 }
5625 hwrm_req_drop(bp, req);
5626 return rc;
5627 }
5628
bnxt_hwrm_ring_grp_alloc(struct bnxt * bp)5629 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5630 {
5631 struct hwrm_ring_grp_alloc_output *resp;
5632 struct hwrm_ring_grp_alloc_input *req;
5633 int rc;
5634 u16 i;
5635
5636 if (bp->flags & BNXT_FLAG_CHIP_P5)
5637 return 0;
5638
5639 rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5640 if (rc)
5641 return rc;
5642
5643 resp = hwrm_req_hold(bp, req);
5644 for (i = 0; i < bp->rx_nr_rings; i++) {
5645 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5646
5647 req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5648 req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5649 req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5650 req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5651
5652 rc = hwrm_req_send(bp, req);
5653
5654 if (rc)
5655 break;
5656
5657 bp->grp_info[grp_idx].fw_grp_id =
5658 le32_to_cpu(resp->ring_group_id);
5659 }
5660 hwrm_req_drop(bp, req);
5661 return rc;
5662 }
5663
bnxt_hwrm_ring_grp_free(struct bnxt * bp)5664 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5665 {
5666 struct hwrm_ring_grp_free_input *req;
5667 u16 i;
5668
5669 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5670 return;
5671
5672 if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5673 return;
5674
5675 hwrm_req_hold(bp, req);
5676 for (i = 0; i < bp->cp_nr_rings; i++) {
5677 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5678 continue;
5679 req->ring_group_id =
5680 cpu_to_le32(bp->grp_info[i].fw_grp_id);
5681
5682 hwrm_req_send(bp, req);
5683 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5684 }
5685 hwrm_req_drop(bp, req);
5686 }
5687
hwrm_ring_alloc_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,u32 map_index)5688 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5689 struct bnxt_ring_struct *ring,
5690 u32 ring_type, u32 map_index)
5691 {
5692 struct hwrm_ring_alloc_output *resp;
5693 struct hwrm_ring_alloc_input *req;
5694 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5695 struct bnxt_ring_grp_info *grp_info;
5696 int rc, err = 0;
5697 u16 ring_id;
5698
5699 rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5700 if (rc)
5701 goto exit;
5702
5703 req->enables = 0;
5704 if (rmem->nr_pages > 1) {
5705 req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5706 /* Page size is in log2 units */
5707 req->page_size = BNXT_PAGE_SHIFT;
5708 req->page_tbl_depth = 1;
5709 } else {
5710 req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
5711 }
5712 req->fbo = 0;
5713 /* Association of ring index with doorbell index and MSIX number */
5714 req->logical_id = cpu_to_le16(map_index);
5715
5716 switch (ring_type) {
5717 case HWRM_RING_ALLOC_TX: {
5718 struct bnxt_tx_ring_info *txr;
5719
5720 txr = container_of(ring, struct bnxt_tx_ring_info,
5721 tx_ring_struct);
5722 req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5723 /* Association of transmit ring with completion ring */
5724 grp_info = &bp->grp_info[ring->grp_idx];
5725 req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5726 req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5727 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5728 req->queue_id = cpu_to_le16(ring->queue_id);
5729 break;
5730 }
5731 case HWRM_RING_ALLOC_RX:
5732 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5733 req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5734 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5735 u16 flags = 0;
5736
5737 /* Association of rx ring with stats context */
5738 grp_info = &bp->grp_info[ring->grp_idx];
5739 req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5740 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5741 req->enables |= cpu_to_le32(
5742 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5743 if (NET_IP_ALIGN == 2)
5744 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5745 req->flags = cpu_to_le16(flags);
5746 }
5747 break;
5748 case HWRM_RING_ALLOC_AGG:
5749 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5750 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5751 /* Association of agg ring with rx ring */
5752 grp_info = &bp->grp_info[ring->grp_idx];
5753 req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5754 req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5755 req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5756 req->enables |= cpu_to_le32(
5757 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5758 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5759 } else {
5760 req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5761 }
5762 req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5763 break;
5764 case HWRM_RING_ALLOC_CMPL:
5765 req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5766 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5767 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5768 /* Association of cp ring with nq */
5769 grp_info = &bp->grp_info[map_index];
5770 req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5771 req->cq_handle = cpu_to_le64(ring->handle);
5772 req->enables |= cpu_to_le32(
5773 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5774 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5775 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5776 }
5777 break;
5778 case HWRM_RING_ALLOC_NQ:
5779 req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5780 req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5781 if (bp->flags & BNXT_FLAG_USING_MSIX)
5782 req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5783 break;
5784 default:
5785 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5786 ring_type);
5787 return -1;
5788 }
5789
5790 resp = hwrm_req_hold(bp, req);
5791 rc = hwrm_req_send(bp, req);
5792 err = le16_to_cpu(resp->error_code);
5793 ring_id = le16_to_cpu(resp->ring_id);
5794 hwrm_req_drop(bp, req);
5795
5796 exit:
5797 if (rc || err) {
5798 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5799 ring_type, rc, err);
5800 return -EIO;
5801 }
5802 ring->fw_ring_id = ring_id;
5803 return rc;
5804 }
5805
bnxt_hwrm_set_async_event_cr(struct bnxt * bp,int idx)5806 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5807 {
5808 int rc;
5809
5810 if (BNXT_PF(bp)) {
5811 struct hwrm_func_cfg_input *req;
5812
5813 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5814 if (rc)
5815 return rc;
5816
5817 req->fid = cpu_to_le16(0xffff);
5818 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5819 req->async_event_cr = cpu_to_le16(idx);
5820 return hwrm_req_send(bp, req);
5821 } else {
5822 struct hwrm_func_vf_cfg_input *req;
5823
5824 rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5825 if (rc)
5826 return rc;
5827
5828 req->enables =
5829 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5830 req->async_event_cr = cpu_to_le16(idx);
5831 return hwrm_req_send(bp, req);
5832 }
5833 }
5834
bnxt_set_db(struct bnxt * bp,struct bnxt_db_info * db,u32 ring_type,u32 map_idx,u32 xid)5835 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5836 u32 map_idx, u32 xid)
5837 {
5838 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5839 if (BNXT_PF(bp))
5840 db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5841 else
5842 db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5843 switch (ring_type) {
5844 case HWRM_RING_ALLOC_TX:
5845 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5846 break;
5847 case HWRM_RING_ALLOC_RX:
5848 case HWRM_RING_ALLOC_AGG:
5849 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5850 break;
5851 case HWRM_RING_ALLOC_CMPL:
5852 db->db_key64 = DBR_PATH_L2;
5853 break;
5854 case HWRM_RING_ALLOC_NQ:
5855 db->db_key64 = DBR_PATH_L2;
5856 break;
5857 }
5858 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5859 } else {
5860 db->doorbell = bp->bar1 + map_idx * 0x80;
5861 switch (ring_type) {
5862 case HWRM_RING_ALLOC_TX:
5863 db->db_key32 = DB_KEY_TX;
5864 break;
5865 case HWRM_RING_ALLOC_RX:
5866 case HWRM_RING_ALLOC_AGG:
5867 db->db_key32 = DB_KEY_RX;
5868 break;
5869 case HWRM_RING_ALLOC_CMPL:
5870 db->db_key32 = DB_KEY_CP;
5871 break;
5872 }
5873 }
5874 }
5875
bnxt_hwrm_ring_alloc(struct bnxt * bp)5876 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5877 {
5878 bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5879 int i, rc = 0;
5880 u32 type;
5881
5882 if (bp->flags & BNXT_FLAG_CHIP_P5)
5883 type = HWRM_RING_ALLOC_NQ;
5884 else
5885 type = HWRM_RING_ALLOC_CMPL;
5886 for (i = 0; i < bp->cp_nr_rings; i++) {
5887 struct bnxt_napi *bnapi = bp->bnapi[i];
5888 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5889 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5890 u32 map_idx = ring->map_idx;
5891 unsigned int vector;
5892
5893 vector = bp->irq_tbl[map_idx].vector;
5894 disable_irq_nosync(vector);
5895 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5896 if (rc) {
5897 enable_irq(vector);
5898 goto err_out;
5899 }
5900 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5901 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5902 enable_irq(vector);
5903 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5904
5905 if (!i) {
5906 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5907 if (rc)
5908 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5909 }
5910 }
5911
5912 type = HWRM_RING_ALLOC_TX;
5913 for (i = 0; i < bp->tx_nr_rings; i++) {
5914 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5915 struct bnxt_ring_struct *ring;
5916 u32 map_idx;
5917
5918 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5919 struct bnxt_napi *bnapi = txr->bnapi;
5920 struct bnxt_cp_ring_info *cpr, *cpr2;
5921 u32 type2 = HWRM_RING_ALLOC_CMPL;
5922
5923 cpr = &bnapi->cp_ring;
5924 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5925 ring = &cpr2->cp_ring_struct;
5926 ring->handle = BNXT_TX_HDL;
5927 map_idx = bnapi->index;
5928 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5929 if (rc)
5930 goto err_out;
5931 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5932 ring->fw_ring_id);
5933 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5934 }
5935 ring = &txr->tx_ring_struct;
5936 map_idx = i;
5937 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5938 if (rc)
5939 goto err_out;
5940 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5941 }
5942
5943 type = HWRM_RING_ALLOC_RX;
5944 for (i = 0; i < bp->rx_nr_rings; i++) {
5945 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5946 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5947 struct bnxt_napi *bnapi = rxr->bnapi;
5948 u32 map_idx = bnapi->index;
5949
5950 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5951 if (rc)
5952 goto err_out;
5953 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5954 /* If we have agg rings, post agg buffers first. */
5955 if (!agg_rings)
5956 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5957 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5958 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5959 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5960 u32 type2 = HWRM_RING_ALLOC_CMPL;
5961 struct bnxt_cp_ring_info *cpr2;
5962
5963 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5964 ring = &cpr2->cp_ring_struct;
5965 ring->handle = BNXT_RX_HDL;
5966 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5967 if (rc)
5968 goto err_out;
5969 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5970 ring->fw_ring_id);
5971 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5972 }
5973 }
5974
5975 if (agg_rings) {
5976 type = HWRM_RING_ALLOC_AGG;
5977 for (i = 0; i < bp->rx_nr_rings; i++) {
5978 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5979 struct bnxt_ring_struct *ring =
5980 &rxr->rx_agg_ring_struct;
5981 u32 grp_idx = ring->grp_idx;
5982 u32 map_idx = grp_idx + bp->rx_nr_rings;
5983
5984 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5985 if (rc)
5986 goto err_out;
5987
5988 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5989 ring->fw_ring_id);
5990 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5991 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5992 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5993 }
5994 }
5995 err_out:
5996 return rc;
5997 }
5998
hwrm_ring_free_send_msg(struct bnxt * bp,struct bnxt_ring_struct * ring,u32 ring_type,int cmpl_ring_id)5999 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6000 struct bnxt_ring_struct *ring,
6001 u32 ring_type, int cmpl_ring_id)
6002 {
6003 struct hwrm_ring_free_output *resp;
6004 struct hwrm_ring_free_input *req;
6005 u16 error_code = 0;
6006 int rc;
6007
6008 if (BNXT_NO_FW_ACCESS(bp))
6009 return 0;
6010
6011 rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6012 if (rc)
6013 goto exit;
6014
6015 req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6016 req->ring_type = ring_type;
6017 req->ring_id = cpu_to_le16(ring->fw_ring_id);
6018
6019 resp = hwrm_req_hold(bp, req);
6020 rc = hwrm_req_send(bp, req);
6021 error_code = le16_to_cpu(resp->error_code);
6022 hwrm_req_drop(bp, req);
6023 exit:
6024 if (rc || error_code) {
6025 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6026 ring_type, rc, error_code);
6027 return -EIO;
6028 }
6029 return 0;
6030 }
6031
bnxt_hwrm_ring_free(struct bnxt * bp,bool close_path)6032 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6033 {
6034 u32 type;
6035 int i;
6036
6037 if (!bp->bnapi)
6038 return;
6039
6040 for (i = 0; i < bp->tx_nr_rings; i++) {
6041 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6042 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6043
6044 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6045 u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6046
6047 hwrm_ring_free_send_msg(bp, ring,
6048 RING_FREE_REQ_RING_TYPE_TX,
6049 close_path ? cmpl_ring_id :
6050 INVALID_HW_RING_ID);
6051 ring->fw_ring_id = INVALID_HW_RING_ID;
6052 }
6053 }
6054
6055 for (i = 0; i < bp->rx_nr_rings; i++) {
6056 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6057 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6058 u32 grp_idx = rxr->bnapi->index;
6059
6060 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6061 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6062
6063 hwrm_ring_free_send_msg(bp, ring,
6064 RING_FREE_REQ_RING_TYPE_RX,
6065 close_path ? cmpl_ring_id :
6066 INVALID_HW_RING_ID);
6067 ring->fw_ring_id = INVALID_HW_RING_ID;
6068 bp->grp_info[grp_idx].rx_fw_ring_id =
6069 INVALID_HW_RING_ID;
6070 }
6071 }
6072
6073 if (bp->flags & BNXT_FLAG_CHIP_P5)
6074 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6075 else
6076 type = RING_FREE_REQ_RING_TYPE_RX;
6077 for (i = 0; i < bp->rx_nr_rings; i++) {
6078 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6079 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6080 u32 grp_idx = rxr->bnapi->index;
6081
6082 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6083 u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6084
6085 hwrm_ring_free_send_msg(bp, ring, type,
6086 close_path ? cmpl_ring_id :
6087 INVALID_HW_RING_ID);
6088 ring->fw_ring_id = INVALID_HW_RING_ID;
6089 bp->grp_info[grp_idx].agg_fw_ring_id =
6090 INVALID_HW_RING_ID;
6091 }
6092 }
6093
6094 /* The completion rings are about to be freed. After that the
6095 * IRQ doorbell will not work anymore. So we need to disable
6096 * IRQ here.
6097 */
6098 bnxt_disable_int_sync(bp);
6099
6100 if (bp->flags & BNXT_FLAG_CHIP_P5)
6101 type = RING_FREE_REQ_RING_TYPE_NQ;
6102 else
6103 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6104 for (i = 0; i < bp->cp_nr_rings; i++) {
6105 struct bnxt_napi *bnapi = bp->bnapi[i];
6106 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6107 struct bnxt_ring_struct *ring;
6108 int j;
6109
6110 for (j = 0; j < 2; j++) {
6111 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6112
6113 if (cpr2) {
6114 ring = &cpr2->cp_ring_struct;
6115 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6116 continue;
6117 hwrm_ring_free_send_msg(bp, ring,
6118 RING_FREE_REQ_RING_TYPE_L2_CMPL,
6119 INVALID_HW_RING_ID);
6120 ring->fw_ring_id = INVALID_HW_RING_ID;
6121 }
6122 }
6123 ring = &cpr->cp_ring_struct;
6124 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6125 hwrm_ring_free_send_msg(bp, ring, type,
6126 INVALID_HW_RING_ID);
6127 ring->fw_ring_id = INVALID_HW_RING_ID;
6128 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6129 }
6130 }
6131 }
6132
6133 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6134 bool shared);
6135
bnxt_hwrm_get_rings(struct bnxt * bp)6136 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6137 {
6138 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6139 struct hwrm_func_qcfg_output *resp;
6140 struct hwrm_func_qcfg_input *req;
6141 int rc;
6142
6143 if (bp->hwrm_spec_code < 0x10601)
6144 return 0;
6145
6146 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6147 if (rc)
6148 return rc;
6149
6150 req->fid = cpu_to_le16(0xffff);
6151 resp = hwrm_req_hold(bp, req);
6152 rc = hwrm_req_send(bp, req);
6153 if (rc) {
6154 hwrm_req_drop(bp, req);
6155 return rc;
6156 }
6157
6158 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6159 if (BNXT_NEW_RM(bp)) {
6160 u16 cp, stats;
6161
6162 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6163 hw_resc->resv_hw_ring_grps =
6164 le32_to_cpu(resp->alloc_hw_ring_grps);
6165 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6166 cp = le16_to_cpu(resp->alloc_cmpl_rings);
6167 stats = le16_to_cpu(resp->alloc_stat_ctx);
6168 hw_resc->resv_irqs = cp;
6169 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6170 int rx = hw_resc->resv_rx_rings;
6171 int tx = hw_resc->resv_tx_rings;
6172
6173 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6174 rx >>= 1;
6175 if (cp < (rx + tx)) {
6176 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6177 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6178 rx <<= 1;
6179 hw_resc->resv_rx_rings = rx;
6180 hw_resc->resv_tx_rings = tx;
6181 }
6182 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6183 hw_resc->resv_hw_ring_grps = rx;
6184 }
6185 hw_resc->resv_cp_rings = cp;
6186 hw_resc->resv_stat_ctxs = stats;
6187 }
6188 hwrm_req_drop(bp, req);
6189 return 0;
6190 }
6191
__bnxt_hwrm_get_tx_rings(struct bnxt * bp,u16 fid,int * tx_rings)6192 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6193 {
6194 struct hwrm_func_qcfg_output *resp;
6195 struct hwrm_func_qcfg_input *req;
6196 int rc;
6197
6198 if (bp->hwrm_spec_code < 0x10601)
6199 return 0;
6200
6201 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6202 if (rc)
6203 return rc;
6204
6205 req->fid = cpu_to_le16(fid);
6206 resp = hwrm_req_hold(bp, req);
6207 rc = hwrm_req_send(bp, req);
6208 if (!rc)
6209 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6210
6211 hwrm_req_drop(bp, req);
6212 return rc;
6213 }
6214
6215 static bool bnxt_rfs_supported(struct bnxt *bp);
6216
6217 static struct hwrm_func_cfg_input *
__bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6218 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6219 int ring_grps, int cp_rings, int stats, int vnics)
6220 {
6221 struct hwrm_func_cfg_input *req;
6222 u32 enables = 0;
6223
6224 if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6225 return NULL;
6226
6227 req->fid = cpu_to_le16(0xffff);
6228 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6229 req->num_tx_rings = cpu_to_le16(tx_rings);
6230 if (BNXT_NEW_RM(bp)) {
6231 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6232 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6233 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6234 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6235 enables |= tx_rings + ring_grps ?
6236 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6237 enables |= rx_rings ?
6238 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6239 } else {
6240 enables |= cp_rings ?
6241 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6242 enables |= ring_grps ?
6243 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6244 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6245 }
6246 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6247
6248 req->num_rx_rings = cpu_to_le16(rx_rings);
6249 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6250 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6251 req->num_msix = cpu_to_le16(cp_rings);
6252 req->num_rsscos_ctxs =
6253 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6254 } else {
6255 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6256 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6257 req->num_rsscos_ctxs = cpu_to_le16(1);
6258 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6259 bnxt_rfs_supported(bp))
6260 req->num_rsscos_ctxs =
6261 cpu_to_le16(ring_grps + 1);
6262 }
6263 req->num_stat_ctxs = cpu_to_le16(stats);
6264 req->num_vnics = cpu_to_le16(vnics);
6265 }
6266 req->enables = cpu_to_le32(enables);
6267 return req;
6268 }
6269
6270 static struct hwrm_func_vf_cfg_input *
__bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6271 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6272 int ring_grps, int cp_rings, int stats, int vnics)
6273 {
6274 struct hwrm_func_vf_cfg_input *req;
6275 u32 enables = 0;
6276
6277 if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6278 return NULL;
6279
6280 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6281 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6282 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6283 enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6284 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6285 enables |= tx_rings + ring_grps ?
6286 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6287 } else {
6288 enables |= cp_rings ?
6289 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6290 enables |= ring_grps ?
6291 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6292 }
6293 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6294 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6295
6296 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6297 req->num_tx_rings = cpu_to_le16(tx_rings);
6298 req->num_rx_rings = cpu_to_le16(rx_rings);
6299 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6300 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6301 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6302 } else {
6303 req->num_cmpl_rings = cpu_to_le16(cp_rings);
6304 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6305 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6306 }
6307 req->num_stat_ctxs = cpu_to_le16(stats);
6308 req->num_vnics = cpu_to_le16(vnics);
6309
6310 req->enables = cpu_to_le32(enables);
6311 return req;
6312 }
6313
6314 static int
bnxt_hwrm_reserve_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6315 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6316 int ring_grps, int cp_rings, int stats, int vnics)
6317 {
6318 struct hwrm_func_cfg_input *req;
6319 int rc;
6320
6321 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6322 cp_rings, stats, vnics);
6323 if (!req)
6324 return -ENOMEM;
6325
6326 if (!req->enables) {
6327 hwrm_req_drop(bp, req);
6328 return 0;
6329 }
6330
6331 rc = hwrm_req_send(bp, req);
6332 if (rc)
6333 return rc;
6334
6335 if (bp->hwrm_spec_code < 0x10601)
6336 bp->hw_resc.resv_tx_rings = tx_rings;
6337
6338 return bnxt_hwrm_get_rings(bp);
6339 }
6340
6341 static int
bnxt_hwrm_reserve_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6342 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6343 int ring_grps, int cp_rings, int stats, int vnics)
6344 {
6345 struct hwrm_func_vf_cfg_input *req;
6346 int rc;
6347
6348 if (!BNXT_NEW_RM(bp)) {
6349 bp->hw_resc.resv_tx_rings = tx_rings;
6350 return 0;
6351 }
6352
6353 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6354 cp_rings, stats, vnics);
6355 if (!req)
6356 return -ENOMEM;
6357
6358 rc = hwrm_req_send(bp, req);
6359 if (rc)
6360 return rc;
6361
6362 return bnxt_hwrm_get_rings(bp);
6363 }
6364
bnxt_hwrm_reserve_rings(struct bnxt * bp,int tx,int rx,int grp,int cp,int stat,int vnic)6365 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6366 int cp, int stat, int vnic)
6367 {
6368 if (BNXT_PF(bp))
6369 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6370 vnic);
6371 else
6372 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6373 vnic);
6374 }
6375
bnxt_nq_rings_in_use(struct bnxt * bp)6376 int bnxt_nq_rings_in_use(struct bnxt *bp)
6377 {
6378 int cp = bp->cp_nr_rings;
6379 int ulp_msix, ulp_base;
6380
6381 ulp_msix = bnxt_get_ulp_msix_num(bp);
6382 if (ulp_msix) {
6383 ulp_base = bnxt_get_ulp_msix_base(bp);
6384 cp += ulp_msix;
6385 if ((ulp_base + ulp_msix) > cp)
6386 cp = ulp_base + ulp_msix;
6387 }
6388 return cp;
6389 }
6390
bnxt_cp_rings_in_use(struct bnxt * bp)6391 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6392 {
6393 int cp;
6394
6395 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6396 return bnxt_nq_rings_in_use(bp);
6397
6398 cp = bp->tx_nr_rings + bp->rx_nr_rings;
6399 return cp;
6400 }
6401
bnxt_get_func_stat_ctxs(struct bnxt * bp)6402 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6403 {
6404 int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6405 int cp = bp->cp_nr_rings;
6406
6407 if (!ulp_stat)
6408 return cp;
6409
6410 if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6411 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6412
6413 return cp + ulp_stat;
6414 }
6415
6416 /* Check if a default RSS map needs to be setup. This function is only
6417 * used on older firmware that does not require reserving RX rings.
6418 */
bnxt_check_rss_tbl_no_rmgr(struct bnxt * bp)6419 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6420 {
6421 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6422
6423 /* The RSS map is valid for RX rings set to resv_rx_rings */
6424 if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6425 hw_resc->resv_rx_rings = bp->rx_nr_rings;
6426 if (!netif_is_rxfh_configured(bp->dev))
6427 bnxt_set_dflt_rss_indir_tbl(bp);
6428 }
6429 }
6430
bnxt_need_reserve_rings(struct bnxt * bp)6431 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6432 {
6433 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6434 int cp = bnxt_cp_rings_in_use(bp);
6435 int nq = bnxt_nq_rings_in_use(bp);
6436 int rx = bp->rx_nr_rings, stat;
6437 int vnic = 1, grp = rx;
6438
6439 if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6440 bp->hwrm_spec_code >= 0x10601)
6441 return true;
6442
6443 /* Old firmware does not need RX ring reservations but we still
6444 * need to setup a default RSS map when needed. With new firmware
6445 * we go through RX ring reservations first and then set up the
6446 * RSS map for the successfully reserved RX rings when needed.
6447 */
6448 if (!BNXT_NEW_RM(bp)) {
6449 bnxt_check_rss_tbl_no_rmgr(bp);
6450 return false;
6451 }
6452 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6453 vnic = rx + 1;
6454 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6455 rx <<= 1;
6456 stat = bnxt_get_func_stat_ctxs(bp);
6457 if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6458 hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6459 (hw_resc->resv_hw_ring_grps != grp &&
6460 !(bp->flags & BNXT_FLAG_CHIP_P5)))
6461 return true;
6462 if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6463 hw_resc->resv_irqs != nq)
6464 return true;
6465 return false;
6466 }
6467
__bnxt_reserve_rings(struct bnxt * bp)6468 static int __bnxt_reserve_rings(struct bnxt *bp)
6469 {
6470 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6471 int cp = bnxt_nq_rings_in_use(bp);
6472 int tx = bp->tx_nr_rings;
6473 int rx = bp->rx_nr_rings;
6474 int grp, rx_rings, rc;
6475 int vnic = 1, stat;
6476 bool sh = false;
6477
6478 if (!bnxt_need_reserve_rings(bp))
6479 return 0;
6480
6481 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6482 sh = true;
6483 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6484 vnic = rx + 1;
6485 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6486 rx <<= 1;
6487 grp = bp->rx_nr_rings;
6488 stat = bnxt_get_func_stat_ctxs(bp);
6489
6490 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6491 if (rc)
6492 return rc;
6493
6494 tx = hw_resc->resv_tx_rings;
6495 if (BNXT_NEW_RM(bp)) {
6496 rx = hw_resc->resv_rx_rings;
6497 cp = hw_resc->resv_irqs;
6498 grp = hw_resc->resv_hw_ring_grps;
6499 vnic = hw_resc->resv_vnics;
6500 stat = hw_resc->resv_stat_ctxs;
6501 }
6502
6503 rx_rings = rx;
6504 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6505 if (rx >= 2) {
6506 rx_rings = rx >> 1;
6507 } else {
6508 if (netif_running(bp->dev))
6509 return -ENOMEM;
6510
6511 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6512 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6513 bp->dev->hw_features &= ~NETIF_F_LRO;
6514 bp->dev->features &= ~NETIF_F_LRO;
6515 bnxt_set_ring_params(bp);
6516 }
6517 }
6518 rx_rings = min_t(int, rx_rings, grp);
6519 cp = min_t(int, cp, bp->cp_nr_rings);
6520 if (stat > bnxt_get_ulp_stat_ctxs(bp))
6521 stat -= bnxt_get_ulp_stat_ctxs(bp);
6522 cp = min_t(int, cp, stat);
6523 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6524 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6525 rx = rx_rings << 1;
6526 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6527 bp->tx_nr_rings = tx;
6528
6529 /* If we cannot reserve all the RX rings, reset the RSS map only
6530 * if absolutely necessary
6531 */
6532 if (rx_rings != bp->rx_nr_rings) {
6533 netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6534 rx_rings, bp->rx_nr_rings);
6535 if (netif_is_rxfh_configured(bp->dev) &&
6536 (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6537 bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6538 bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6539 netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6540 bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6541 }
6542 }
6543 bp->rx_nr_rings = rx_rings;
6544 bp->cp_nr_rings = cp;
6545
6546 if (!tx || !rx || !cp || !grp || !vnic || !stat)
6547 return -ENOMEM;
6548
6549 if (!netif_is_rxfh_configured(bp->dev))
6550 bnxt_set_dflt_rss_indir_tbl(bp);
6551
6552 return rc;
6553 }
6554
bnxt_hwrm_check_vf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6555 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6556 int ring_grps, int cp_rings, int stats,
6557 int vnics)
6558 {
6559 struct hwrm_func_vf_cfg_input *req;
6560 u32 flags;
6561
6562 if (!BNXT_NEW_RM(bp))
6563 return 0;
6564
6565 req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6566 cp_rings, stats, vnics);
6567 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6568 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6569 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6570 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6571 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6572 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6573 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6574 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6575
6576 req->flags = cpu_to_le32(flags);
6577 return hwrm_req_send_silent(bp, req);
6578 }
6579
bnxt_hwrm_check_pf_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6580 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6581 int ring_grps, int cp_rings, int stats,
6582 int vnics)
6583 {
6584 struct hwrm_func_cfg_input *req;
6585 u32 flags;
6586
6587 req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6588 cp_rings, stats, vnics);
6589 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6590 if (BNXT_NEW_RM(bp)) {
6591 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6592 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6593 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6594 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6595 if (bp->flags & BNXT_FLAG_CHIP_P5)
6596 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6597 FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6598 else
6599 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6600 }
6601
6602 req->flags = cpu_to_le32(flags);
6603 return hwrm_req_send_silent(bp, req);
6604 }
6605
bnxt_hwrm_check_rings(struct bnxt * bp,int tx_rings,int rx_rings,int ring_grps,int cp_rings,int stats,int vnics)6606 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6607 int ring_grps, int cp_rings, int stats,
6608 int vnics)
6609 {
6610 if (bp->hwrm_spec_code < 0x10801)
6611 return 0;
6612
6613 if (BNXT_PF(bp))
6614 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6615 ring_grps, cp_rings, stats,
6616 vnics);
6617
6618 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6619 cp_rings, stats, vnics);
6620 }
6621
bnxt_hwrm_coal_params_qcaps(struct bnxt * bp)6622 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6623 {
6624 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6625 struct hwrm_ring_aggint_qcaps_output *resp;
6626 struct hwrm_ring_aggint_qcaps_input *req;
6627 int rc;
6628
6629 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6630 coal_cap->num_cmpl_dma_aggr_max = 63;
6631 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6632 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6633 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6634 coal_cap->int_lat_tmr_min_max = 65535;
6635 coal_cap->int_lat_tmr_max_max = 65535;
6636 coal_cap->num_cmpl_aggr_int_max = 65535;
6637 coal_cap->timer_units = 80;
6638
6639 if (bp->hwrm_spec_code < 0x10902)
6640 return;
6641
6642 if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6643 return;
6644
6645 resp = hwrm_req_hold(bp, req);
6646 rc = hwrm_req_send_silent(bp, req);
6647 if (!rc) {
6648 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6649 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6650 coal_cap->num_cmpl_dma_aggr_max =
6651 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6652 coal_cap->num_cmpl_dma_aggr_during_int_max =
6653 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6654 coal_cap->cmpl_aggr_dma_tmr_max =
6655 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6656 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6657 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6658 coal_cap->int_lat_tmr_min_max =
6659 le16_to_cpu(resp->int_lat_tmr_min_max);
6660 coal_cap->int_lat_tmr_max_max =
6661 le16_to_cpu(resp->int_lat_tmr_max_max);
6662 coal_cap->num_cmpl_aggr_int_max =
6663 le16_to_cpu(resp->num_cmpl_aggr_int_max);
6664 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6665 }
6666 hwrm_req_drop(bp, req);
6667 }
6668
bnxt_usec_to_coal_tmr(struct bnxt * bp,u16 usec)6669 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6670 {
6671 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6672
6673 return usec * 1000 / coal_cap->timer_units;
6674 }
6675
bnxt_hwrm_set_coal_params(struct bnxt * bp,struct bnxt_coal * hw_coal,struct hwrm_ring_cmpl_ring_cfg_aggint_params_input * req)6676 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6677 struct bnxt_coal *hw_coal,
6678 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6679 {
6680 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6681 u16 val, tmr, max, flags = hw_coal->flags;
6682 u32 cmpl_params = coal_cap->cmpl_params;
6683
6684 max = hw_coal->bufs_per_record * 128;
6685 if (hw_coal->budget)
6686 max = hw_coal->bufs_per_record * hw_coal->budget;
6687 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6688
6689 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6690 req->num_cmpl_aggr_int = cpu_to_le16(val);
6691
6692 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6693 req->num_cmpl_dma_aggr = cpu_to_le16(val);
6694
6695 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6696 coal_cap->num_cmpl_dma_aggr_during_int_max);
6697 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6698
6699 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6700 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6701 req->int_lat_tmr_max = cpu_to_le16(tmr);
6702
6703 /* min timer set to 1/2 of interrupt timer */
6704 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6705 val = tmr / 2;
6706 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6707 req->int_lat_tmr_min = cpu_to_le16(val);
6708 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6709 }
6710
6711 /* buf timer set to 1/4 of interrupt timer */
6712 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6713 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6714
6715 if (cmpl_params &
6716 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6717 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6718 val = clamp_t(u16, tmr, 1,
6719 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6720 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6721 req->enables |=
6722 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6723 }
6724
6725 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6726 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6727 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6728 req->flags = cpu_to_le16(flags);
6729 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6730 }
6731
__bnxt_hwrm_set_coal_nq(struct bnxt * bp,struct bnxt_napi * bnapi,struct bnxt_coal * hw_coal)6732 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6733 struct bnxt_coal *hw_coal)
6734 {
6735 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6736 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6737 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6738 u32 nq_params = coal_cap->nq_params;
6739 u16 tmr;
6740 int rc;
6741
6742 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6743 return 0;
6744
6745 rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6746 if (rc)
6747 return rc;
6748
6749 req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6750 req->flags =
6751 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6752
6753 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6754 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6755 req->int_lat_tmr_min = cpu_to_le16(tmr);
6756 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6757 return hwrm_req_send(bp, req);
6758 }
6759
bnxt_hwrm_set_ring_coal(struct bnxt * bp,struct bnxt_napi * bnapi)6760 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6761 {
6762 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6763 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6764 struct bnxt_coal coal;
6765 int rc;
6766
6767 /* Tick values in micro seconds.
6768 * 1 coal_buf x bufs_per_record = 1 completion record.
6769 */
6770 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6771
6772 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6773 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6774
6775 if (!bnapi->rx_ring)
6776 return -ENODEV;
6777
6778 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6779 if (rc)
6780 return rc;
6781
6782 bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6783
6784 req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6785
6786 return hwrm_req_send(bp, req_rx);
6787 }
6788
bnxt_hwrm_set_coal(struct bnxt * bp)6789 int bnxt_hwrm_set_coal(struct bnxt *bp)
6790 {
6791 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6792 *req;
6793 int i, rc;
6794
6795 rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6796 if (rc)
6797 return rc;
6798
6799 rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6800 if (rc) {
6801 hwrm_req_drop(bp, req_rx);
6802 return rc;
6803 }
6804
6805 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6806 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6807
6808 hwrm_req_hold(bp, req_rx);
6809 hwrm_req_hold(bp, req_tx);
6810 for (i = 0; i < bp->cp_nr_rings; i++) {
6811 struct bnxt_napi *bnapi = bp->bnapi[i];
6812 struct bnxt_coal *hw_coal;
6813 u16 ring_id;
6814
6815 req = req_rx;
6816 if (!bnapi->rx_ring) {
6817 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6818 req = req_tx;
6819 } else {
6820 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6821 }
6822 req->ring_id = cpu_to_le16(ring_id);
6823
6824 rc = hwrm_req_send(bp, req);
6825 if (rc)
6826 break;
6827
6828 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6829 continue;
6830
6831 if (bnapi->rx_ring && bnapi->tx_ring) {
6832 req = req_tx;
6833 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6834 req->ring_id = cpu_to_le16(ring_id);
6835 rc = hwrm_req_send(bp, req);
6836 if (rc)
6837 break;
6838 }
6839 if (bnapi->rx_ring)
6840 hw_coal = &bp->rx_coal;
6841 else
6842 hw_coal = &bp->tx_coal;
6843 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6844 }
6845 hwrm_req_drop(bp, req_rx);
6846 hwrm_req_drop(bp, req_tx);
6847 return rc;
6848 }
6849
bnxt_hwrm_stat_ctx_free(struct bnxt * bp)6850 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6851 {
6852 struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6853 struct hwrm_stat_ctx_free_input *req;
6854 int i;
6855
6856 if (!bp->bnapi)
6857 return;
6858
6859 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6860 return;
6861
6862 if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6863 return;
6864 if (BNXT_FW_MAJ(bp) <= 20) {
6865 if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6866 hwrm_req_drop(bp, req);
6867 return;
6868 }
6869 hwrm_req_hold(bp, req0);
6870 }
6871 hwrm_req_hold(bp, req);
6872 for (i = 0; i < bp->cp_nr_rings; i++) {
6873 struct bnxt_napi *bnapi = bp->bnapi[i];
6874 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6875
6876 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6877 req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6878 if (req0) {
6879 req0->stat_ctx_id = req->stat_ctx_id;
6880 hwrm_req_send(bp, req0);
6881 }
6882 hwrm_req_send(bp, req);
6883
6884 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6885 }
6886 }
6887 hwrm_req_drop(bp, req);
6888 if (req0)
6889 hwrm_req_drop(bp, req0);
6890 }
6891
bnxt_hwrm_stat_ctx_alloc(struct bnxt * bp)6892 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6893 {
6894 struct hwrm_stat_ctx_alloc_output *resp;
6895 struct hwrm_stat_ctx_alloc_input *req;
6896 int rc, i;
6897
6898 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6899 return 0;
6900
6901 rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6902 if (rc)
6903 return rc;
6904
6905 req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6906 req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6907
6908 resp = hwrm_req_hold(bp, req);
6909 for (i = 0; i < bp->cp_nr_rings; i++) {
6910 struct bnxt_napi *bnapi = bp->bnapi[i];
6911 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6912
6913 req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6914
6915 rc = hwrm_req_send(bp, req);
6916 if (rc)
6917 break;
6918
6919 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6920
6921 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6922 }
6923 hwrm_req_drop(bp, req);
6924 return rc;
6925 }
6926
bnxt_hwrm_func_qcfg(struct bnxt * bp)6927 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6928 {
6929 struct hwrm_func_qcfg_output *resp;
6930 struct hwrm_func_qcfg_input *req;
6931 u32 min_db_offset = 0;
6932 u16 flags;
6933 int rc;
6934
6935 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6936 if (rc)
6937 return rc;
6938
6939 req->fid = cpu_to_le16(0xffff);
6940 resp = hwrm_req_hold(bp, req);
6941 rc = hwrm_req_send(bp, req);
6942 if (rc)
6943 goto func_qcfg_exit;
6944
6945 #ifdef CONFIG_BNXT_SRIOV
6946 if (BNXT_VF(bp)) {
6947 struct bnxt_vf_info *vf = &bp->vf;
6948
6949 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6950 } else {
6951 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6952 }
6953 #endif
6954 flags = le16_to_cpu(resp->flags);
6955 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6956 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6957 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6958 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6959 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6960 }
6961 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6962 bp->flags |= BNXT_FLAG_MULTI_HOST;
6963
6964 if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6965 bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6966
6967 switch (resp->port_partition_type) {
6968 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6969 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6970 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6971 bp->port_partition_type = resp->port_partition_type;
6972 break;
6973 }
6974 if (bp->hwrm_spec_code < 0x10707 ||
6975 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6976 bp->br_mode = BRIDGE_MODE_VEB;
6977 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6978 bp->br_mode = BRIDGE_MODE_VEPA;
6979 else
6980 bp->br_mode = BRIDGE_MODE_UNDEF;
6981
6982 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6983 if (!bp->max_mtu)
6984 bp->max_mtu = BNXT_MAX_MTU;
6985
6986 if (bp->db_size)
6987 goto func_qcfg_exit;
6988
6989 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6990 if (BNXT_PF(bp))
6991 min_db_offset = DB_PF_OFFSET_P5;
6992 else
6993 min_db_offset = DB_VF_OFFSET_P5;
6994 }
6995 bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6996 1024);
6997 if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6998 bp->db_size <= min_db_offset)
6999 bp->db_size = pci_resource_len(bp->pdev, 2);
7000
7001 func_qcfg_exit:
7002 hwrm_req_drop(bp, req);
7003 return rc;
7004 }
7005
bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info * ctx,struct hwrm_func_backing_store_qcaps_output * resp)7006 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
7007 struct hwrm_func_backing_store_qcaps_output *resp)
7008 {
7009 struct bnxt_mem_init *mem_init;
7010 u16 init_mask;
7011 u8 init_val;
7012 u8 *offset;
7013 int i;
7014
7015 init_val = resp->ctx_kind_initializer;
7016 init_mask = le16_to_cpu(resp->ctx_init_mask);
7017 offset = &resp->qp_init_offset;
7018 mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7019 for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
7020 mem_init->init_val = init_val;
7021 mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7022 if (!init_mask)
7023 continue;
7024 if (i == BNXT_CTX_MEM_INIT_STAT)
7025 offset = &resp->stat_init_offset;
7026 if (init_mask & (1 << i))
7027 mem_init->offset = *offset * 4;
7028 else
7029 mem_init->init_val = 0;
7030 }
7031 ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7032 ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7033 ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7034 ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7035 ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7036 ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
7037 }
7038
bnxt_hwrm_func_backing_store_qcaps(struct bnxt * bp)7039 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7040 {
7041 struct hwrm_func_backing_store_qcaps_output *resp;
7042 struct hwrm_func_backing_store_qcaps_input *req;
7043 int rc;
7044
7045 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7046 return 0;
7047
7048 rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7049 if (rc)
7050 return rc;
7051
7052 resp = hwrm_req_hold(bp, req);
7053 rc = hwrm_req_send_silent(bp, req);
7054 if (!rc) {
7055 struct bnxt_ctx_pg_info *ctx_pg;
7056 struct bnxt_ctx_mem_info *ctx;
7057 int i, tqm_rings;
7058
7059 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7060 if (!ctx) {
7061 rc = -ENOMEM;
7062 goto ctx_err;
7063 }
7064 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7065 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7066 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7067 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7068 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7069 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7070 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7071 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7072 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7073 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7074 ctx->vnic_max_vnic_entries =
7075 le16_to_cpu(resp->vnic_max_vnic_entries);
7076 ctx->vnic_max_ring_table_entries =
7077 le16_to_cpu(resp->vnic_max_ring_table_entries);
7078 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7079 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7080 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7081 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7082 ctx->tqm_min_entries_per_ring =
7083 le32_to_cpu(resp->tqm_min_entries_per_ring);
7084 ctx->tqm_max_entries_per_ring =
7085 le32_to_cpu(resp->tqm_max_entries_per_ring);
7086 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7087 if (!ctx->tqm_entries_multiple)
7088 ctx->tqm_entries_multiple = 1;
7089 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7090 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
7091 ctx->mrav_num_entries_units =
7092 le16_to_cpu(resp->mrav_num_entries_units);
7093 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7094 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
7095
7096 bnxt_init_ctx_initializer(ctx, resp);
7097
7098 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7099 if (!ctx->tqm_fp_rings_count)
7100 ctx->tqm_fp_rings_count = bp->max_q;
7101 else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7102 ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7103
7104 tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
7105 ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7106 if (!ctx_pg) {
7107 kfree(ctx);
7108 rc = -ENOMEM;
7109 goto ctx_err;
7110 }
7111 for (i = 0; i < tqm_rings; i++, ctx_pg++)
7112 ctx->tqm_mem[i] = ctx_pg;
7113 bp->ctx = ctx;
7114 } else {
7115 rc = 0;
7116 }
7117 ctx_err:
7118 hwrm_req_drop(bp, req);
7119 return rc;
7120 }
7121
bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info * rmem,u8 * pg_attr,__le64 * pg_dir)7122 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7123 __le64 *pg_dir)
7124 {
7125 if (!rmem->nr_pages)
7126 return;
7127
7128 BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7129 if (rmem->depth >= 1) {
7130 if (rmem->depth == 2)
7131 *pg_attr |= 2;
7132 else
7133 *pg_attr |= 1;
7134 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7135 } else {
7136 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7137 }
7138 }
7139
7140 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
7141 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
7142 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
7143 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
7144 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
7145 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7146
bnxt_hwrm_func_backing_store_cfg(struct bnxt * bp,u32 enables)7147 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7148 {
7149 struct hwrm_func_backing_store_cfg_input *req;
7150 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7151 struct bnxt_ctx_pg_info *ctx_pg;
7152 void **__req = (void **)&req;
7153 u32 req_len = sizeof(*req);
7154 __le32 *num_entries;
7155 __le64 *pg_dir;
7156 u32 flags = 0;
7157 u8 *pg_attr;
7158 u32 ena;
7159 int rc;
7160 int i;
7161
7162 if (!ctx)
7163 return 0;
7164
7165 if (req_len > bp->hwrm_max_ext_req_len)
7166 req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7167 rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7168 if (rc)
7169 return rc;
7170
7171 req->enables = cpu_to_le32(enables);
7172 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7173 ctx_pg = &ctx->qp_mem;
7174 req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7175 req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7176 req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7177 req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7178 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7179 &req->qpc_pg_size_qpc_lvl,
7180 &req->qpc_page_dir);
7181 }
7182 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7183 ctx_pg = &ctx->srq_mem;
7184 req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7185 req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7186 req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7187 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7188 &req->srq_pg_size_srq_lvl,
7189 &req->srq_page_dir);
7190 }
7191 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7192 ctx_pg = &ctx->cq_mem;
7193 req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7194 req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7195 req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7196 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7197 &req->cq_pg_size_cq_lvl,
7198 &req->cq_page_dir);
7199 }
7200 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7201 ctx_pg = &ctx->vnic_mem;
7202 req->vnic_num_vnic_entries =
7203 cpu_to_le16(ctx->vnic_max_vnic_entries);
7204 req->vnic_num_ring_table_entries =
7205 cpu_to_le16(ctx->vnic_max_ring_table_entries);
7206 req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7207 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7208 &req->vnic_pg_size_vnic_lvl,
7209 &req->vnic_page_dir);
7210 }
7211 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7212 ctx_pg = &ctx->stat_mem;
7213 req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7214 req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7215 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7216 &req->stat_pg_size_stat_lvl,
7217 &req->stat_page_dir);
7218 }
7219 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7220 ctx_pg = &ctx->mrav_mem;
7221 req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7222 if (ctx->mrav_num_entries_units)
7223 flags |=
7224 FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7225 req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7226 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7227 &req->mrav_pg_size_mrav_lvl,
7228 &req->mrav_page_dir);
7229 }
7230 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7231 ctx_pg = &ctx->tim_mem;
7232 req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7233 req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7234 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7235 &req->tim_pg_size_tim_lvl,
7236 &req->tim_page_dir);
7237 }
7238 for (i = 0, num_entries = &req->tqm_sp_num_entries,
7239 pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7240 pg_dir = &req->tqm_sp_page_dir,
7241 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7242 i < BNXT_MAX_TQM_RINGS;
7243 i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7244 if (!(enables & ena))
7245 continue;
7246
7247 req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7248 ctx_pg = ctx->tqm_mem[i];
7249 *num_entries = cpu_to_le32(ctx_pg->entries);
7250 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7251 }
7252 req->flags = cpu_to_le32(flags);
7253 return hwrm_req_send(bp, req);
7254 }
7255
bnxt_alloc_ctx_mem_blk(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)7256 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7257 struct bnxt_ctx_pg_info *ctx_pg)
7258 {
7259 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7260
7261 rmem->page_size = BNXT_PAGE_SIZE;
7262 rmem->pg_arr = ctx_pg->ctx_pg_arr;
7263 rmem->dma_arr = ctx_pg->ctx_dma_arr;
7264 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7265 if (rmem->depth >= 1)
7266 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7267 return bnxt_alloc_ring(bp, rmem);
7268 }
7269
bnxt_alloc_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg,u32 mem_size,u8 depth,struct bnxt_mem_init * mem_init)7270 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7271 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7272 u8 depth, struct bnxt_mem_init *mem_init)
7273 {
7274 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7275 int rc;
7276
7277 if (!mem_size)
7278 return -EINVAL;
7279
7280 ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7281 if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7282 ctx_pg->nr_pages = 0;
7283 return -EINVAL;
7284 }
7285 if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7286 int nr_tbls, i;
7287
7288 rmem->depth = 2;
7289 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7290 GFP_KERNEL);
7291 if (!ctx_pg->ctx_pg_tbl)
7292 return -ENOMEM;
7293 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7294 rmem->nr_pages = nr_tbls;
7295 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7296 if (rc)
7297 return rc;
7298 for (i = 0; i < nr_tbls; i++) {
7299 struct bnxt_ctx_pg_info *pg_tbl;
7300
7301 pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7302 if (!pg_tbl)
7303 return -ENOMEM;
7304 ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7305 rmem = &pg_tbl->ring_mem;
7306 rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7307 rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7308 rmem->depth = 1;
7309 rmem->nr_pages = MAX_CTX_PAGES;
7310 rmem->mem_init = mem_init;
7311 if (i == (nr_tbls - 1)) {
7312 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7313
7314 if (rem)
7315 rmem->nr_pages = rem;
7316 }
7317 rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7318 if (rc)
7319 break;
7320 }
7321 } else {
7322 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7323 if (rmem->nr_pages > 1 || depth)
7324 rmem->depth = 1;
7325 rmem->mem_init = mem_init;
7326 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7327 }
7328 return rc;
7329 }
7330
bnxt_free_ctx_pg_tbls(struct bnxt * bp,struct bnxt_ctx_pg_info * ctx_pg)7331 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7332 struct bnxt_ctx_pg_info *ctx_pg)
7333 {
7334 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7335
7336 if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7337 ctx_pg->ctx_pg_tbl) {
7338 int i, nr_tbls = rmem->nr_pages;
7339
7340 for (i = 0; i < nr_tbls; i++) {
7341 struct bnxt_ctx_pg_info *pg_tbl;
7342 struct bnxt_ring_mem_info *rmem2;
7343
7344 pg_tbl = ctx_pg->ctx_pg_tbl[i];
7345 if (!pg_tbl)
7346 continue;
7347 rmem2 = &pg_tbl->ring_mem;
7348 bnxt_free_ring(bp, rmem2);
7349 ctx_pg->ctx_pg_arr[i] = NULL;
7350 kfree(pg_tbl);
7351 ctx_pg->ctx_pg_tbl[i] = NULL;
7352 }
7353 kfree(ctx_pg->ctx_pg_tbl);
7354 ctx_pg->ctx_pg_tbl = NULL;
7355 }
7356 bnxt_free_ring(bp, rmem);
7357 ctx_pg->nr_pages = 0;
7358 }
7359
bnxt_free_ctx_mem(struct bnxt * bp)7360 void bnxt_free_ctx_mem(struct bnxt *bp)
7361 {
7362 struct bnxt_ctx_mem_info *ctx = bp->ctx;
7363 int i;
7364
7365 if (!ctx)
7366 return;
7367
7368 if (ctx->tqm_mem[0]) {
7369 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7370 bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7371 kfree(ctx->tqm_mem[0]);
7372 ctx->tqm_mem[0] = NULL;
7373 }
7374
7375 bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7376 bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7377 bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7378 bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7379 bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7380 bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7381 bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7382 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7383 }
7384
bnxt_alloc_ctx_mem(struct bnxt * bp)7385 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7386 {
7387 struct bnxt_ctx_pg_info *ctx_pg;
7388 struct bnxt_ctx_mem_info *ctx;
7389 struct bnxt_mem_init *init;
7390 u32 mem_size, ena, entries;
7391 u32 entries_sp, min;
7392 u32 num_mr, num_ah;
7393 u32 extra_srqs = 0;
7394 u32 extra_qps = 0;
7395 u8 pg_lvl = 1;
7396 int i, rc;
7397
7398 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7399 if (rc) {
7400 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7401 rc);
7402 return rc;
7403 }
7404 ctx = bp->ctx;
7405 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7406 return 0;
7407
7408 if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7409 pg_lvl = 2;
7410 extra_qps = 65536;
7411 extra_srqs = 8192;
7412 }
7413
7414 ctx_pg = &ctx->qp_mem;
7415 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7416 extra_qps;
7417 if (ctx->qp_entry_size) {
7418 mem_size = ctx->qp_entry_size * ctx_pg->entries;
7419 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7420 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7421 if (rc)
7422 return rc;
7423 }
7424
7425 ctx_pg = &ctx->srq_mem;
7426 ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7427 if (ctx->srq_entry_size) {
7428 mem_size = ctx->srq_entry_size * ctx_pg->entries;
7429 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7430 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7431 if (rc)
7432 return rc;
7433 }
7434
7435 ctx_pg = &ctx->cq_mem;
7436 ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7437 if (ctx->cq_entry_size) {
7438 mem_size = ctx->cq_entry_size * ctx_pg->entries;
7439 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7440 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7441 if (rc)
7442 return rc;
7443 }
7444
7445 ctx_pg = &ctx->vnic_mem;
7446 ctx_pg->entries = ctx->vnic_max_vnic_entries +
7447 ctx->vnic_max_ring_table_entries;
7448 if (ctx->vnic_entry_size) {
7449 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7450 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7451 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7452 if (rc)
7453 return rc;
7454 }
7455
7456 ctx_pg = &ctx->stat_mem;
7457 ctx_pg->entries = ctx->stat_max_entries;
7458 if (ctx->stat_entry_size) {
7459 mem_size = ctx->stat_entry_size * ctx_pg->entries;
7460 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7461 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7462 if (rc)
7463 return rc;
7464 }
7465
7466 ena = 0;
7467 if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7468 goto skip_rdma;
7469
7470 ctx_pg = &ctx->mrav_mem;
7471 /* 128K extra is needed to accommodate static AH context
7472 * allocation by f/w.
7473 */
7474 num_mr = 1024 * 256;
7475 num_ah = 1024 * 128;
7476 ctx_pg->entries = num_mr + num_ah;
7477 if (ctx->mrav_entry_size) {
7478 mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7479 init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7480 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7481 if (rc)
7482 return rc;
7483 }
7484 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7485 if (ctx->mrav_num_entries_units)
7486 ctx_pg->entries =
7487 ((num_mr / ctx->mrav_num_entries_units) << 16) |
7488 (num_ah / ctx->mrav_num_entries_units);
7489
7490 ctx_pg = &ctx->tim_mem;
7491 ctx_pg->entries = ctx->qp_mem.entries;
7492 if (ctx->tim_entry_size) {
7493 mem_size = ctx->tim_entry_size * ctx_pg->entries;
7494 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7495 if (rc)
7496 return rc;
7497 }
7498 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7499
7500 skip_rdma:
7501 min = ctx->tqm_min_entries_per_ring;
7502 entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7503 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7504 entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7505 entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7506 entries = roundup(entries, ctx->tqm_entries_multiple);
7507 entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7508 for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7509 ctx_pg = ctx->tqm_mem[i];
7510 ctx_pg->entries = i ? entries : entries_sp;
7511 if (ctx->tqm_entry_size) {
7512 mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7513 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7514 NULL);
7515 if (rc)
7516 return rc;
7517 }
7518 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7519 }
7520 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7521 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7522 if (rc) {
7523 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7524 rc);
7525 return rc;
7526 }
7527 ctx->flags |= BNXT_CTX_FLAG_INITED;
7528 return 0;
7529 }
7530
bnxt_hwrm_func_resc_qcaps(struct bnxt * bp,bool all)7531 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7532 {
7533 struct hwrm_func_resource_qcaps_output *resp;
7534 struct hwrm_func_resource_qcaps_input *req;
7535 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7536 int rc;
7537
7538 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7539 if (rc)
7540 return rc;
7541
7542 req->fid = cpu_to_le16(0xffff);
7543 resp = hwrm_req_hold(bp, req);
7544 rc = hwrm_req_send_silent(bp, req);
7545 if (rc)
7546 goto hwrm_func_resc_qcaps_exit;
7547
7548 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7549 if (!all)
7550 goto hwrm_func_resc_qcaps_exit;
7551
7552 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7553 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7554 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7555 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7556 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7557 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7558 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7559 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7560 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7561 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7562 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7563 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7564 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7565 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7566 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7567 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7568
7569 if (bp->flags & BNXT_FLAG_CHIP_P5) {
7570 u16 max_msix = le16_to_cpu(resp->max_msix);
7571
7572 hw_resc->max_nqs = max_msix;
7573 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7574 }
7575
7576 if (BNXT_PF(bp)) {
7577 struct bnxt_pf_info *pf = &bp->pf;
7578
7579 pf->vf_resv_strategy =
7580 le16_to_cpu(resp->vf_reservation_strategy);
7581 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7582 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7583 }
7584 hwrm_func_resc_qcaps_exit:
7585 hwrm_req_drop(bp, req);
7586 return rc;
7587 }
7588
__bnxt_hwrm_ptp_qcfg(struct bnxt * bp)7589 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7590 {
7591 struct hwrm_port_mac_ptp_qcfg_output *resp;
7592 struct hwrm_port_mac_ptp_qcfg_input *req;
7593 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7594 bool phc_cfg;
7595 u8 flags;
7596 int rc;
7597
7598 if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_THOR(bp)) {
7599 rc = -ENODEV;
7600 goto no_ptp;
7601 }
7602
7603 rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7604 if (rc)
7605 goto no_ptp;
7606
7607 req->port_id = cpu_to_le16(bp->pf.port_id);
7608 resp = hwrm_req_hold(bp, req);
7609 rc = hwrm_req_send(bp, req);
7610 if (rc)
7611 goto exit;
7612
7613 flags = resp->flags;
7614 if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7615 rc = -ENODEV;
7616 goto exit;
7617 }
7618 if (!ptp) {
7619 ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7620 if (!ptp) {
7621 rc = -ENOMEM;
7622 goto exit;
7623 }
7624 ptp->bp = bp;
7625 bp->ptp_cfg = ptp;
7626 }
7627 if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7628 ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7629 ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7630 } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7631 ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7632 ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7633 } else {
7634 rc = -ENODEV;
7635 goto exit;
7636 }
7637 phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7638 rc = bnxt_ptp_init(bp, phc_cfg);
7639 if (rc)
7640 netdev_warn(bp->dev, "PTP initialization failed.\n");
7641 exit:
7642 hwrm_req_drop(bp, req);
7643 if (!rc)
7644 return 0;
7645
7646 no_ptp:
7647 bnxt_ptp_clear(bp);
7648 kfree(ptp);
7649 bp->ptp_cfg = NULL;
7650 return rc;
7651 }
7652
__bnxt_hwrm_func_qcaps(struct bnxt * bp)7653 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7654 {
7655 struct hwrm_func_qcaps_output *resp;
7656 struct hwrm_func_qcaps_input *req;
7657 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7658 u32 flags, flags_ext, flags_ext2;
7659 int rc;
7660
7661 rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7662 if (rc)
7663 return rc;
7664
7665 req->fid = cpu_to_le16(0xffff);
7666 resp = hwrm_req_hold(bp, req);
7667 rc = hwrm_req_send(bp, req);
7668 if (rc)
7669 goto hwrm_func_qcaps_exit;
7670
7671 flags = le32_to_cpu(resp->flags);
7672 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7673 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7674 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7675 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7676 if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7677 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7678 if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7679 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7680 if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7681 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7682 if (flags & FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7683 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7684 if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7685 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7686 if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7687 bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7688 if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7689 bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7690
7691 flags_ext = le32_to_cpu(resp->flags_ext);
7692 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7693 bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7694 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7695 bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7696 if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7697 bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7698 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7699 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7700 if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7701 bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7702
7703 flags_ext2 = le32_to_cpu(resp->flags_ext2);
7704 if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
7705 bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7706
7707 bp->tx_push_thresh = 0;
7708 if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7709 BNXT_FW_MAJ(bp) > 217)
7710 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7711
7712 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7713 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7714 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7715 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7716 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7717 if (!hw_resc->max_hw_ring_grps)
7718 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7719 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7720 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7721 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7722
7723 if (BNXT_PF(bp)) {
7724 struct bnxt_pf_info *pf = &bp->pf;
7725
7726 pf->fw_fid = le16_to_cpu(resp->fid);
7727 pf->port_id = le16_to_cpu(resp->port_id);
7728 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7729 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7730 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7731 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7732 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7733 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7734 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7735 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7736 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7737 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7738 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7739 bp->flags |= BNXT_FLAG_WOL_CAP;
7740 if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7741 bp->fw_cap |= BNXT_FW_CAP_PTP;
7742 } else {
7743 bnxt_ptp_clear(bp);
7744 kfree(bp->ptp_cfg);
7745 bp->ptp_cfg = NULL;
7746 }
7747 } else {
7748 #ifdef CONFIG_BNXT_SRIOV
7749 struct bnxt_vf_info *vf = &bp->vf;
7750
7751 vf->fw_fid = le16_to_cpu(resp->fid);
7752 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7753 #endif
7754 }
7755
7756 hwrm_func_qcaps_exit:
7757 hwrm_req_drop(bp, req);
7758 return rc;
7759 }
7760
bnxt_hwrm_dbg_qcaps(struct bnxt * bp)7761 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7762 {
7763 struct hwrm_dbg_qcaps_output *resp;
7764 struct hwrm_dbg_qcaps_input *req;
7765 int rc;
7766
7767 bp->fw_dbg_cap = 0;
7768 if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7769 return;
7770
7771 rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7772 if (rc)
7773 return;
7774
7775 req->fid = cpu_to_le16(0xffff);
7776 resp = hwrm_req_hold(bp, req);
7777 rc = hwrm_req_send(bp, req);
7778 if (rc)
7779 goto hwrm_dbg_qcaps_exit;
7780
7781 bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7782
7783 hwrm_dbg_qcaps_exit:
7784 hwrm_req_drop(bp, req);
7785 }
7786
7787 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7788
bnxt_hwrm_func_qcaps(struct bnxt * bp)7789 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7790 {
7791 int rc;
7792
7793 rc = __bnxt_hwrm_func_qcaps(bp);
7794 if (rc)
7795 return rc;
7796
7797 bnxt_hwrm_dbg_qcaps(bp);
7798
7799 rc = bnxt_hwrm_queue_qportcfg(bp);
7800 if (rc) {
7801 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7802 return rc;
7803 }
7804 if (bp->hwrm_spec_code >= 0x10803) {
7805 rc = bnxt_alloc_ctx_mem(bp);
7806 if (rc)
7807 return rc;
7808 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7809 if (!rc)
7810 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7811 }
7812 return 0;
7813 }
7814
bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt * bp)7815 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7816 {
7817 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7818 struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7819 u32 flags;
7820 int rc;
7821
7822 if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7823 return 0;
7824
7825 rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7826 if (rc)
7827 return rc;
7828
7829 resp = hwrm_req_hold(bp, req);
7830 rc = hwrm_req_send(bp, req);
7831 if (rc)
7832 goto hwrm_cfa_adv_qcaps_exit;
7833
7834 flags = le32_to_cpu(resp->flags);
7835 if (flags &
7836 CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7837 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7838
7839 hwrm_cfa_adv_qcaps_exit:
7840 hwrm_req_drop(bp, req);
7841 return rc;
7842 }
7843
__bnxt_alloc_fw_health(struct bnxt * bp)7844 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7845 {
7846 if (bp->fw_health)
7847 return 0;
7848
7849 bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7850 if (!bp->fw_health)
7851 return -ENOMEM;
7852
7853 mutex_init(&bp->fw_health->lock);
7854 return 0;
7855 }
7856
bnxt_alloc_fw_health(struct bnxt * bp)7857 static int bnxt_alloc_fw_health(struct bnxt *bp)
7858 {
7859 int rc;
7860
7861 if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7862 !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7863 return 0;
7864
7865 rc = __bnxt_alloc_fw_health(bp);
7866 if (rc) {
7867 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7868 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7869 return rc;
7870 }
7871
7872 return 0;
7873 }
7874
__bnxt_map_fw_health_reg(struct bnxt * bp,u32 reg)7875 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7876 {
7877 writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7878 BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7879 BNXT_FW_HEALTH_WIN_MAP_OFF);
7880 }
7881
bnxt_inv_fw_health_reg(struct bnxt * bp)7882 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7883 {
7884 struct bnxt_fw_health *fw_health = bp->fw_health;
7885 u32 reg_type;
7886
7887 if (!fw_health)
7888 return;
7889
7890 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7891 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7892 fw_health->status_reliable = false;
7893
7894 reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7895 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7896 fw_health->resets_reliable = false;
7897 }
7898
bnxt_try_map_fw_health_reg(struct bnxt * bp)7899 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7900 {
7901 void __iomem *hs;
7902 u32 status_loc;
7903 u32 reg_type;
7904 u32 sig;
7905
7906 if (bp->fw_health)
7907 bp->fw_health->status_reliable = false;
7908
7909 __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7910 hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7911
7912 sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7913 if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7914 if (!bp->chip_num) {
7915 __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7916 bp->chip_num = readl(bp->bar0 +
7917 BNXT_FW_HEALTH_WIN_BASE +
7918 BNXT_GRC_REG_CHIP_NUM);
7919 }
7920 if (!BNXT_CHIP_P5(bp))
7921 return;
7922
7923 status_loc = BNXT_GRC_REG_STATUS_P5 |
7924 BNXT_FW_HEALTH_REG_TYPE_BAR0;
7925 } else {
7926 status_loc = readl(hs + offsetof(struct hcomm_status,
7927 fw_status_loc));
7928 }
7929
7930 if (__bnxt_alloc_fw_health(bp)) {
7931 netdev_warn(bp->dev, "no memory for firmware status checks\n");
7932 return;
7933 }
7934
7935 bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7936 reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7937 if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7938 __bnxt_map_fw_health_reg(bp, status_loc);
7939 bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7940 BNXT_FW_HEALTH_WIN_OFF(status_loc);
7941 }
7942
7943 bp->fw_health->status_reliable = true;
7944 }
7945
bnxt_map_fw_health_regs(struct bnxt * bp)7946 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7947 {
7948 struct bnxt_fw_health *fw_health = bp->fw_health;
7949 u32 reg_base = 0xffffffff;
7950 int i;
7951
7952 bp->fw_health->status_reliable = false;
7953 bp->fw_health->resets_reliable = false;
7954 /* Only pre-map the monitoring GRC registers using window 3 */
7955 for (i = 0; i < 4; i++) {
7956 u32 reg = fw_health->regs[i];
7957
7958 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7959 continue;
7960 if (reg_base == 0xffffffff)
7961 reg_base = reg & BNXT_GRC_BASE_MASK;
7962 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7963 return -ERANGE;
7964 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7965 }
7966 bp->fw_health->status_reliable = true;
7967 bp->fw_health->resets_reliable = true;
7968 if (reg_base == 0xffffffff)
7969 return 0;
7970
7971 __bnxt_map_fw_health_reg(bp, reg_base);
7972 return 0;
7973 }
7974
bnxt_remap_fw_health_regs(struct bnxt * bp)7975 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7976 {
7977 if (!bp->fw_health)
7978 return;
7979
7980 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7981 bp->fw_health->status_reliable = true;
7982 bp->fw_health->resets_reliable = true;
7983 } else {
7984 bnxt_try_map_fw_health_reg(bp);
7985 }
7986 }
7987
bnxt_hwrm_error_recovery_qcfg(struct bnxt * bp)7988 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7989 {
7990 struct bnxt_fw_health *fw_health = bp->fw_health;
7991 struct hwrm_error_recovery_qcfg_output *resp;
7992 struct hwrm_error_recovery_qcfg_input *req;
7993 int rc, i;
7994
7995 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7996 return 0;
7997
7998 rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7999 if (rc)
8000 return rc;
8001
8002 resp = hwrm_req_hold(bp, req);
8003 rc = hwrm_req_send(bp, req);
8004 if (rc)
8005 goto err_recovery_out;
8006 fw_health->flags = le32_to_cpu(resp->flags);
8007 if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8008 !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8009 rc = -EINVAL;
8010 goto err_recovery_out;
8011 }
8012 fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
8013 fw_health->master_func_wait_dsecs =
8014 le32_to_cpu(resp->master_func_wait_period);
8015 fw_health->normal_func_wait_dsecs =
8016 le32_to_cpu(resp->normal_func_wait_period);
8017 fw_health->post_reset_wait_dsecs =
8018 le32_to_cpu(resp->master_func_wait_period_after_reset);
8019 fw_health->post_reset_max_wait_dsecs =
8020 le32_to_cpu(resp->max_bailout_time_after_reset);
8021 fw_health->regs[BNXT_FW_HEALTH_REG] =
8022 le32_to_cpu(resp->fw_health_status_reg);
8023 fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
8024 le32_to_cpu(resp->fw_heartbeat_reg);
8025 fw_health->regs[BNXT_FW_RESET_CNT_REG] =
8026 le32_to_cpu(resp->fw_reset_cnt_reg);
8027 fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
8028 le32_to_cpu(resp->reset_inprogress_reg);
8029 fw_health->fw_reset_inprog_reg_mask =
8030 le32_to_cpu(resp->reset_inprogress_reg_mask);
8031 fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
8032 if (fw_health->fw_reset_seq_cnt >= 16) {
8033 rc = -EINVAL;
8034 goto err_recovery_out;
8035 }
8036 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
8037 fw_health->fw_reset_seq_regs[i] =
8038 le32_to_cpu(resp->reset_reg[i]);
8039 fw_health->fw_reset_seq_vals[i] =
8040 le32_to_cpu(resp->reset_reg_val[i]);
8041 fw_health->fw_reset_seq_delay_msec[i] =
8042 resp->delay_after_reset[i];
8043 }
8044 err_recovery_out:
8045 hwrm_req_drop(bp, req);
8046 if (!rc)
8047 rc = bnxt_map_fw_health_regs(bp);
8048 if (rc)
8049 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8050 return rc;
8051 }
8052
bnxt_hwrm_func_reset(struct bnxt * bp)8053 static int bnxt_hwrm_func_reset(struct bnxt *bp)
8054 {
8055 struct hwrm_func_reset_input *req;
8056 int rc;
8057
8058 rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8059 if (rc)
8060 return rc;
8061
8062 req->enables = 0;
8063 hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8064 return hwrm_req_send(bp, req);
8065 }
8066
bnxt_nvm_cfg_ver_get(struct bnxt * bp)8067 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8068 {
8069 struct hwrm_nvm_get_dev_info_output nvm_info;
8070
8071 if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8072 snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8073 nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
8074 nvm_info.nvm_cfg_ver_upd);
8075 }
8076
bnxt_hwrm_queue_qportcfg(struct bnxt * bp)8077 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8078 {
8079 struct hwrm_queue_qportcfg_output *resp;
8080 struct hwrm_queue_qportcfg_input *req;
8081 u8 i, j, *qptr;
8082 bool no_rdma;
8083 int rc = 0;
8084
8085 rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8086 if (rc)
8087 return rc;
8088
8089 resp = hwrm_req_hold(bp, req);
8090 rc = hwrm_req_send(bp, req);
8091 if (rc)
8092 goto qportcfg_exit;
8093
8094 if (!resp->max_configurable_queues) {
8095 rc = -EINVAL;
8096 goto qportcfg_exit;
8097 }
8098 bp->max_tc = resp->max_configurable_queues;
8099 bp->max_lltc = resp->max_configurable_lossless_queues;
8100 if (bp->max_tc > BNXT_MAX_QUEUE)
8101 bp->max_tc = BNXT_MAX_QUEUE;
8102
8103 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8104 qptr = &resp->queue_id0;
8105 for (i = 0, j = 0; i < bp->max_tc; i++) {
8106 bp->q_info[j].queue_id = *qptr;
8107 bp->q_ids[i] = *qptr++;
8108 bp->q_info[j].queue_profile = *qptr++;
8109 bp->tc_to_qidx[j] = j;
8110 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8111 (no_rdma && BNXT_PF(bp)))
8112 j++;
8113 }
8114 bp->max_q = bp->max_tc;
8115 bp->max_tc = max_t(u8, j, 1);
8116
8117 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8118 bp->max_tc = 1;
8119
8120 if (bp->max_lltc > bp->max_tc)
8121 bp->max_lltc = bp->max_tc;
8122
8123 qportcfg_exit:
8124 hwrm_req_drop(bp, req);
8125 return rc;
8126 }
8127
bnxt_hwrm_poll(struct bnxt * bp)8128 static int bnxt_hwrm_poll(struct bnxt *bp)
8129 {
8130 struct hwrm_ver_get_input *req;
8131 int rc;
8132
8133 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8134 if (rc)
8135 return rc;
8136
8137 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8138 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8139 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8140
8141 hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8142 rc = hwrm_req_send(bp, req);
8143 return rc;
8144 }
8145
bnxt_hwrm_ver_get(struct bnxt * bp)8146 static int bnxt_hwrm_ver_get(struct bnxt *bp)
8147 {
8148 struct hwrm_ver_get_output *resp;
8149 struct hwrm_ver_get_input *req;
8150 u16 fw_maj, fw_min, fw_bld, fw_rsv;
8151 u32 dev_caps_cfg, hwrm_ver;
8152 int rc, len;
8153
8154 rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8155 if (rc)
8156 return rc;
8157
8158 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8159 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8160 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8161 req->hwrm_intf_min = HWRM_VERSION_MINOR;
8162 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8163
8164 resp = hwrm_req_hold(bp, req);
8165 rc = hwrm_req_send(bp, req);
8166 if (rc)
8167 goto hwrm_ver_get_exit;
8168
8169 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8170
8171 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8172 resp->hwrm_intf_min_8b << 8 |
8173 resp->hwrm_intf_upd_8b;
8174 if (resp->hwrm_intf_maj_8b < 1) {
8175 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8176 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8177 resp->hwrm_intf_upd_8b);
8178 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8179 }
8180
8181 hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8182 HWRM_VERSION_UPDATE;
8183
8184 if (bp->hwrm_spec_code > hwrm_ver)
8185 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8186 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8187 HWRM_VERSION_UPDATE);
8188 else
8189 snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8190 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8191 resp->hwrm_intf_upd_8b);
8192
8193 fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8194 if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8195 fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8196 fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8197 fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8198 len = FW_VER_STR_LEN;
8199 } else {
8200 fw_maj = resp->hwrm_fw_maj_8b;
8201 fw_min = resp->hwrm_fw_min_8b;
8202 fw_bld = resp->hwrm_fw_bld_8b;
8203 fw_rsv = resp->hwrm_fw_rsvd_8b;
8204 len = BC_HWRM_STR_LEN;
8205 }
8206 bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8207 snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8208 fw_rsv);
8209
8210 if (strlen(resp->active_pkg_name)) {
8211 int fw_ver_len = strlen(bp->fw_ver_str);
8212
8213 snprintf(bp->fw_ver_str + fw_ver_len,
8214 FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8215 resp->active_pkg_name);
8216 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8217 }
8218
8219 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8220 if (!bp->hwrm_cmd_timeout)
8221 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8222 bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8223 if (!bp->hwrm_cmd_max_timeout)
8224 bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8225 else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8226 netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8227 bp->hwrm_cmd_max_timeout / 1000);
8228
8229 if (resp->hwrm_intf_maj_8b >= 1) {
8230 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8231 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8232 }
8233 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8234 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8235
8236 bp->chip_num = le16_to_cpu(resp->chip_num);
8237 bp->chip_rev = resp->chip_rev;
8238 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8239 !resp->chip_metal)
8240 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8241
8242 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8243 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8244 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8245 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8246
8247 if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8248 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8249
8250 if (dev_caps_cfg &
8251 VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8252 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8253
8254 if (dev_caps_cfg &
8255 VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8256 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8257
8258 if (dev_caps_cfg &
8259 VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8260 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8261
8262 hwrm_ver_get_exit:
8263 hwrm_req_drop(bp, req);
8264 return rc;
8265 }
8266
bnxt_hwrm_fw_set_time(struct bnxt * bp)8267 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8268 {
8269 struct hwrm_fw_set_time_input *req;
8270 struct tm tm;
8271 time64_t now = ktime_get_real_seconds();
8272 int rc;
8273
8274 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8275 bp->hwrm_spec_code < 0x10400)
8276 return -EOPNOTSUPP;
8277
8278 time64_to_tm(now, 0, &tm);
8279 rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8280 if (rc)
8281 return rc;
8282
8283 req->year = cpu_to_le16(1900 + tm.tm_year);
8284 req->month = 1 + tm.tm_mon;
8285 req->day = tm.tm_mday;
8286 req->hour = tm.tm_hour;
8287 req->minute = tm.tm_min;
8288 req->second = tm.tm_sec;
8289 return hwrm_req_send(bp, req);
8290 }
8291
bnxt_add_one_ctr(u64 hw,u64 * sw,u64 mask)8292 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8293 {
8294 u64 sw_tmp;
8295
8296 hw &= mask;
8297 sw_tmp = (*sw & ~mask) | hw;
8298 if (hw < (*sw & mask))
8299 sw_tmp += mask + 1;
8300 WRITE_ONCE(*sw, sw_tmp);
8301 }
8302
__bnxt_accumulate_stats(__le64 * hw_stats,u64 * sw_stats,u64 * masks,int count,bool ignore_zero)8303 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8304 int count, bool ignore_zero)
8305 {
8306 int i;
8307
8308 for (i = 0; i < count; i++) {
8309 u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8310
8311 if (ignore_zero && !hw)
8312 continue;
8313
8314 if (masks[i] == -1ULL)
8315 sw_stats[i] = hw;
8316 else
8317 bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8318 }
8319 }
8320
bnxt_accumulate_stats(struct bnxt_stats_mem * stats)8321 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8322 {
8323 if (!stats->hw_stats)
8324 return;
8325
8326 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8327 stats->hw_masks, stats->len / 8, false);
8328 }
8329
bnxt_accumulate_all_stats(struct bnxt * bp)8330 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8331 {
8332 struct bnxt_stats_mem *ring0_stats;
8333 bool ignore_zero = false;
8334 int i;
8335
8336 /* Chip bug. Counter intermittently becomes 0. */
8337 if (bp->flags & BNXT_FLAG_CHIP_P5)
8338 ignore_zero = true;
8339
8340 for (i = 0; i < bp->cp_nr_rings; i++) {
8341 struct bnxt_napi *bnapi = bp->bnapi[i];
8342 struct bnxt_cp_ring_info *cpr;
8343 struct bnxt_stats_mem *stats;
8344
8345 cpr = &bnapi->cp_ring;
8346 stats = &cpr->stats;
8347 if (!i)
8348 ring0_stats = stats;
8349 __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8350 ring0_stats->hw_masks,
8351 ring0_stats->len / 8, ignore_zero);
8352 }
8353 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8354 struct bnxt_stats_mem *stats = &bp->port_stats;
8355 __le64 *hw_stats = stats->hw_stats;
8356 u64 *sw_stats = stats->sw_stats;
8357 u64 *masks = stats->hw_masks;
8358 int cnt;
8359
8360 cnt = sizeof(struct rx_port_stats) / 8;
8361 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8362
8363 hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8364 sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8365 masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8366 cnt = sizeof(struct tx_port_stats) / 8;
8367 __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8368 }
8369 if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8370 bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8371 bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8372 }
8373 }
8374
bnxt_hwrm_port_qstats(struct bnxt * bp,u8 flags)8375 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8376 {
8377 struct hwrm_port_qstats_input *req;
8378 struct bnxt_pf_info *pf = &bp->pf;
8379 int rc;
8380
8381 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8382 return 0;
8383
8384 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8385 return -EOPNOTSUPP;
8386
8387 rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8388 if (rc)
8389 return rc;
8390
8391 req->flags = flags;
8392 req->port_id = cpu_to_le16(pf->port_id);
8393 req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8394 BNXT_TX_PORT_STATS_BYTE_OFFSET);
8395 req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8396 return hwrm_req_send(bp, req);
8397 }
8398
bnxt_hwrm_port_qstats_ext(struct bnxt * bp,u8 flags)8399 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8400 {
8401 struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8402 struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8403 struct hwrm_port_qstats_ext_output *resp_qs;
8404 struct hwrm_port_qstats_ext_input *req_qs;
8405 struct bnxt_pf_info *pf = &bp->pf;
8406 u32 tx_stat_size;
8407 int rc;
8408
8409 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8410 return 0;
8411
8412 if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8413 return -EOPNOTSUPP;
8414
8415 rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8416 if (rc)
8417 return rc;
8418
8419 req_qs->flags = flags;
8420 req_qs->port_id = cpu_to_le16(pf->port_id);
8421 req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8422 req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8423 tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8424 sizeof(struct tx_port_stats_ext) : 0;
8425 req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8426 req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8427 resp_qs = hwrm_req_hold(bp, req_qs);
8428 rc = hwrm_req_send(bp, req_qs);
8429 if (!rc) {
8430 bp->fw_rx_stats_ext_size =
8431 le16_to_cpu(resp_qs->rx_stat_size) / 8;
8432 if (BNXT_FW_MAJ(bp) < 220 &&
8433 bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8434 bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8435
8436 bp->fw_tx_stats_ext_size = tx_stat_size ?
8437 le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8438 } else {
8439 bp->fw_rx_stats_ext_size = 0;
8440 bp->fw_tx_stats_ext_size = 0;
8441 }
8442 hwrm_req_drop(bp, req_qs);
8443
8444 if (flags)
8445 return rc;
8446
8447 if (bp->fw_tx_stats_ext_size <=
8448 offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8449 bp->pri2cos_valid = 0;
8450 return rc;
8451 }
8452
8453 rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8454 if (rc)
8455 return rc;
8456
8457 req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8458
8459 resp_qc = hwrm_req_hold(bp, req_qc);
8460 rc = hwrm_req_send(bp, req_qc);
8461 if (!rc) {
8462 u8 *pri2cos;
8463 int i, j;
8464
8465 pri2cos = &resp_qc->pri0_cos_queue_id;
8466 for (i = 0; i < 8; i++) {
8467 u8 queue_id = pri2cos[i];
8468 u8 queue_idx;
8469
8470 /* Per port queue IDs start from 0, 10, 20, etc */
8471 queue_idx = queue_id % 10;
8472 if (queue_idx > BNXT_MAX_QUEUE) {
8473 bp->pri2cos_valid = false;
8474 hwrm_req_drop(bp, req_qc);
8475 return rc;
8476 }
8477 for (j = 0; j < bp->max_q; j++) {
8478 if (bp->q_ids[j] == queue_id)
8479 bp->pri2cos_idx[i] = queue_idx;
8480 }
8481 }
8482 bp->pri2cos_valid = true;
8483 }
8484 hwrm_req_drop(bp, req_qc);
8485
8486 return rc;
8487 }
8488
bnxt_hwrm_free_tunnel_ports(struct bnxt * bp)8489 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8490 {
8491 bnxt_hwrm_tunnel_dst_port_free(bp,
8492 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8493 bnxt_hwrm_tunnel_dst_port_free(bp,
8494 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8495 }
8496
bnxt_set_tpa(struct bnxt * bp,bool set_tpa)8497 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8498 {
8499 int rc, i;
8500 u32 tpa_flags = 0;
8501
8502 if (set_tpa)
8503 tpa_flags = bp->flags & BNXT_FLAG_TPA;
8504 else if (BNXT_NO_FW_ACCESS(bp))
8505 return 0;
8506 for (i = 0; i < bp->nr_vnics; i++) {
8507 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8508 if (rc) {
8509 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8510 i, rc);
8511 return rc;
8512 }
8513 }
8514 return 0;
8515 }
8516
bnxt_hwrm_clear_vnic_rss(struct bnxt * bp)8517 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8518 {
8519 int i;
8520
8521 for (i = 0; i < bp->nr_vnics; i++)
8522 bnxt_hwrm_vnic_set_rss(bp, i, false);
8523 }
8524
bnxt_clear_vnic(struct bnxt * bp)8525 static void bnxt_clear_vnic(struct bnxt *bp)
8526 {
8527 if (!bp->vnic_info)
8528 return;
8529
8530 bnxt_hwrm_clear_vnic_filter(bp);
8531 if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8532 /* clear all RSS setting before free vnic ctx */
8533 bnxt_hwrm_clear_vnic_rss(bp);
8534 bnxt_hwrm_vnic_ctx_free(bp);
8535 }
8536 /* before free the vnic, undo the vnic tpa settings */
8537 if (bp->flags & BNXT_FLAG_TPA)
8538 bnxt_set_tpa(bp, false);
8539 bnxt_hwrm_vnic_free(bp);
8540 if (bp->flags & BNXT_FLAG_CHIP_P5)
8541 bnxt_hwrm_vnic_ctx_free(bp);
8542 }
8543
bnxt_hwrm_resource_free(struct bnxt * bp,bool close_path,bool irq_re_init)8544 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8545 bool irq_re_init)
8546 {
8547 bnxt_clear_vnic(bp);
8548 bnxt_hwrm_ring_free(bp, close_path);
8549 bnxt_hwrm_ring_grp_free(bp);
8550 if (irq_re_init) {
8551 bnxt_hwrm_stat_ctx_free(bp);
8552 bnxt_hwrm_free_tunnel_ports(bp);
8553 }
8554 }
8555
bnxt_hwrm_set_br_mode(struct bnxt * bp,u16 br_mode)8556 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8557 {
8558 struct hwrm_func_cfg_input *req;
8559 u8 evb_mode;
8560 int rc;
8561
8562 if (br_mode == BRIDGE_MODE_VEB)
8563 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8564 else if (br_mode == BRIDGE_MODE_VEPA)
8565 evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8566 else
8567 return -EINVAL;
8568
8569 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8570 if (rc)
8571 return rc;
8572
8573 req->fid = cpu_to_le16(0xffff);
8574 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8575 req->evb_mode = evb_mode;
8576 return hwrm_req_send(bp, req);
8577 }
8578
bnxt_hwrm_set_cache_line_size(struct bnxt * bp,int size)8579 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8580 {
8581 struct hwrm_func_cfg_input *req;
8582 int rc;
8583
8584 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8585 return 0;
8586
8587 rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8588 if (rc)
8589 return rc;
8590
8591 req->fid = cpu_to_le16(0xffff);
8592 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8593 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8594 if (size == 128)
8595 req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8596
8597 return hwrm_req_send(bp, req);
8598 }
8599
__bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)8600 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8601 {
8602 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8603 int rc;
8604
8605 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8606 goto skip_rss_ctx;
8607
8608 /* allocate context for vnic */
8609 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8610 if (rc) {
8611 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8612 vnic_id, rc);
8613 goto vnic_setup_err;
8614 }
8615 bp->rsscos_nr_ctxs++;
8616
8617 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8618 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8619 if (rc) {
8620 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8621 vnic_id, rc);
8622 goto vnic_setup_err;
8623 }
8624 bp->rsscos_nr_ctxs++;
8625 }
8626
8627 skip_rss_ctx:
8628 /* configure default vnic, ring grp */
8629 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8630 if (rc) {
8631 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8632 vnic_id, rc);
8633 goto vnic_setup_err;
8634 }
8635
8636 /* Enable RSS hashing on vnic */
8637 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8638 if (rc) {
8639 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8640 vnic_id, rc);
8641 goto vnic_setup_err;
8642 }
8643
8644 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8645 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8646 if (rc) {
8647 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8648 vnic_id, rc);
8649 }
8650 }
8651
8652 vnic_setup_err:
8653 return rc;
8654 }
8655
__bnxt_setup_vnic_p5(struct bnxt * bp,u16 vnic_id)8656 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8657 {
8658 int rc, i, nr_ctxs;
8659
8660 nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8661 for (i = 0; i < nr_ctxs; i++) {
8662 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8663 if (rc) {
8664 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8665 vnic_id, i, rc);
8666 break;
8667 }
8668 bp->rsscos_nr_ctxs++;
8669 }
8670 if (i < nr_ctxs)
8671 return -ENOMEM;
8672
8673 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8674 if (rc) {
8675 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8676 vnic_id, rc);
8677 return rc;
8678 }
8679 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8680 if (rc) {
8681 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8682 vnic_id, rc);
8683 return rc;
8684 }
8685 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8686 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8687 if (rc) {
8688 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8689 vnic_id, rc);
8690 }
8691 }
8692 return rc;
8693 }
8694
bnxt_setup_vnic(struct bnxt * bp,u16 vnic_id)8695 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8696 {
8697 if (bp->flags & BNXT_FLAG_CHIP_P5)
8698 return __bnxt_setup_vnic_p5(bp, vnic_id);
8699 else
8700 return __bnxt_setup_vnic(bp, vnic_id);
8701 }
8702
bnxt_alloc_rfs_vnics(struct bnxt * bp)8703 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8704 {
8705 #ifdef CONFIG_RFS_ACCEL
8706 int i, rc = 0;
8707
8708 if (bp->flags & BNXT_FLAG_CHIP_P5)
8709 return 0;
8710
8711 for (i = 0; i < bp->rx_nr_rings; i++) {
8712 struct bnxt_vnic_info *vnic;
8713 u16 vnic_id = i + 1;
8714 u16 ring_id = i;
8715
8716 if (vnic_id >= bp->nr_vnics)
8717 break;
8718
8719 vnic = &bp->vnic_info[vnic_id];
8720 vnic->flags |= BNXT_VNIC_RFS_FLAG;
8721 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8722 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8723 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8724 if (rc) {
8725 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8726 vnic_id, rc);
8727 break;
8728 }
8729 rc = bnxt_setup_vnic(bp, vnic_id);
8730 if (rc)
8731 break;
8732 }
8733 return rc;
8734 #else
8735 return 0;
8736 #endif
8737 }
8738
8739 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
bnxt_promisc_ok(struct bnxt * bp)8740 static bool bnxt_promisc_ok(struct bnxt *bp)
8741 {
8742 #ifdef CONFIG_BNXT_SRIOV
8743 if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8744 return false;
8745 #endif
8746 return true;
8747 }
8748
bnxt_setup_nitroa0_vnic(struct bnxt * bp)8749 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8750 {
8751 unsigned int rc = 0;
8752
8753 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8754 if (rc) {
8755 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8756 rc);
8757 return rc;
8758 }
8759
8760 rc = bnxt_hwrm_vnic_cfg(bp, 1);
8761 if (rc) {
8762 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8763 rc);
8764 return rc;
8765 }
8766 return rc;
8767 }
8768
8769 static int bnxt_cfg_rx_mode(struct bnxt *);
8770 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8771
bnxt_init_chip(struct bnxt * bp,bool irq_re_init)8772 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8773 {
8774 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8775 int rc = 0;
8776 unsigned int rx_nr_rings = bp->rx_nr_rings;
8777
8778 if (irq_re_init) {
8779 rc = bnxt_hwrm_stat_ctx_alloc(bp);
8780 if (rc) {
8781 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8782 rc);
8783 goto err_out;
8784 }
8785 }
8786
8787 rc = bnxt_hwrm_ring_alloc(bp);
8788 if (rc) {
8789 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8790 goto err_out;
8791 }
8792
8793 rc = bnxt_hwrm_ring_grp_alloc(bp);
8794 if (rc) {
8795 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8796 goto err_out;
8797 }
8798
8799 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8800 rx_nr_rings--;
8801
8802 /* default vnic 0 */
8803 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8804 if (rc) {
8805 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8806 goto err_out;
8807 }
8808
8809 if (BNXT_VF(bp))
8810 bnxt_hwrm_func_qcfg(bp);
8811
8812 rc = bnxt_setup_vnic(bp, 0);
8813 if (rc)
8814 goto err_out;
8815 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
8816 bnxt_hwrm_update_rss_hash_cfg(bp);
8817
8818 if (bp->flags & BNXT_FLAG_RFS) {
8819 rc = bnxt_alloc_rfs_vnics(bp);
8820 if (rc)
8821 goto err_out;
8822 }
8823
8824 if (bp->flags & BNXT_FLAG_TPA) {
8825 rc = bnxt_set_tpa(bp, true);
8826 if (rc)
8827 goto err_out;
8828 }
8829
8830 if (BNXT_VF(bp))
8831 bnxt_update_vf_mac(bp);
8832
8833 /* Filter for default vnic 0 */
8834 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8835 if (rc) {
8836 if (BNXT_VF(bp) && rc == -ENODEV)
8837 netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8838 else
8839 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8840 goto err_out;
8841 }
8842 vnic->uc_filter_count = 1;
8843
8844 vnic->rx_mask = 0;
8845 if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8846 goto skip_rx_mask;
8847
8848 if (bp->dev->flags & IFF_BROADCAST)
8849 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8850
8851 if (bp->dev->flags & IFF_PROMISC)
8852 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8853
8854 if (bp->dev->flags & IFF_ALLMULTI) {
8855 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8856 vnic->mc_list_count = 0;
8857 } else if (bp->dev->flags & IFF_MULTICAST) {
8858 u32 mask = 0;
8859
8860 bnxt_mc_list_updated(bp, &mask);
8861 vnic->rx_mask |= mask;
8862 }
8863
8864 rc = bnxt_cfg_rx_mode(bp);
8865 if (rc)
8866 goto err_out;
8867
8868 skip_rx_mask:
8869 rc = bnxt_hwrm_set_coal(bp);
8870 if (rc)
8871 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8872 rc);
8873
8874 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8875 rc = bnxt_setup_nitroa0_vnic(bp);
8876 if (rc)
8877 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8878 rc);
8879 }
8880
8881 if (BNXT_VF(bp)) {
8882 bnxt_hwrm_func_qcfg(bp);
8883 netdev_update_features(bp->dev);
8884 }
8885
8886 return 0;
8887
8888 err_out:
8889 bnxt_hwrm_resource_free(bp, 0, true);
8890
8891 return rc;
8892 }
8893
bnxt_shutdown_nic(struct bnxt * bp,bool irq_re_init)8894 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8895 {
8896 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8897 return 0;
8898 }
8899
bnxt_init_nic(struct bnxt * bp,bool irq_re_init)8900 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8901 {
8902 bnxt_init_cp_rings(bp);
8903 bnxt_init_rx_rings(bp);
8904 bnxt_init_tx_rings(bp);
8905 bnxt_init_ring_grps(bp, irq_re_init);
8906 bnxt_init_vnics(bp);
8907
8908 return bnxt_init_chip(bp, irq_re_init);
8909 }
8910
bnxt_set_real_num_queues(struct bnxt * bp)8911 static int bnxt_set_real_num_queues(struct bnxt *bp)
8912 {
8913 int rc;
8914 struct net_device *dev = bp->dev;
8915
8916 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8917 bp->tx_nr_rings_xdp);
8918 if (rc)
8919 return rc;
8920
8921 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8922 if (rc)
8923 return rc;
8924
8925 #ifdef CONFIG_RFS_ACCEL
8926 if (bp->flags & BNXT_FLAG_RFS)
8927 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8928 #endif
8929
8930 return rc;
8931 }
8932
bnxt_trim_rings(struct bnxt * bp,int * rx,int * tx,int max,bool shared)8933 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8934 bool shared)
8935 {
8936 int _rx = *rx, _tx = *tx;
8937
8938 if (shared) {
8939 *rx = min_t(int, _rx, max);
8940 *tx = min_t(int, _tx, max);
8941 } else {
8942 if (max < 2)
8943 return -ENOMEM;
8944
8945 while (_rx + _tx > max) {
8946 if (_rx > _tx && _rx > 1)
8947 _rx--;
8948 else if (_tx > 1)
8949 _tx--;
8950 }
8951 *rx = _rx;
8952 *tx = _tx;
8953 }
8954 return 0;
8955 }
8956
bnxt_setup_msix(struct bnxt * bp)8957 static void bnxt_setup_msix(struct bnxt *bp)
8958 {
8959 const int len = sizeof(bp->irq_tbl[0].name);
8960 struct net_device *dev = bp->dev;
8961 int tcs, i;
8962
8963 tcs = netdev_get_num_tc(dev);
8964 if (tcs) {
8965 int i, off, count;
8966
8967 for (i = 0; i < tcs; i++) {
8968 count = bp->tx_nr_rings_per_tc;
8969 off = i * count;
8970 netdev_set_tc_queue(dev, i, count, off);
8971 }
8972 }
8973
8974 for (i = 0; i < bp->cp_nr_rings; i++) {
8975 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8976 char *attr;
8977
8978 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8979 attr = "TxRx";
8980 else if (i < bp->rx_nr_rings)
8981 attr = "rx";
8982 else
8983 attr = "tx";
8984
8985 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8986 attr, i);
8987 bp->irq_tbl[map_idx].handler = bnxt_msix;
8988 }
8989 }
8990
bnxt_setup_inta(struct bnxt * bp)8991 static void bnxt_setup_inta(struct bnxt *bp)
8992 {
8993 const int len = sizeof(bp->irq_tbl[0].name);
8994
8995 if (netdev_get_num_tc(bp->dev))
8996 netdev_reset_tc(bp->dev);
8997
8998 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8999 0);
9000 bp->irq_tbl[0].handler = bnxt_inta;
9001 }
9002
9003 static int bnxt_init_int_mode(struct bnxt *bp);
9004
bnxt_setup_int_mode(struct bnxt * bp)9005 static int bnxt_setup_int_mode(struct bnxt *bp)
9006 {
9007 int rc;
9008
9009 if (!bp->irq_tbl) {
9010 rc = bnxt_init_int_mode(bp);
9011 if (rc || !bp->irq_tbl)
9012 return rc ?: -ENODEV;
9013 }
9014
9015 if (bp->flags & BNXT_FLAG_USING_MSIX)
9016 bnxt_setup_msix(bp);
9017 else
9018 bnxt_setup_inta(bp);
9019
9020 rc = bnxt_set_real_num_queues(bp);
9021 return rc;
9022 }
9023
9024 #ifdef CONFIG_RFS_ACCEL
bnxt_get_max_func_rss_ctxs(struct bnxt * bp)9025 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9026 {
9027 return bp->hw_resc.max_rsscos_ctxs;
9028 }
9029
bnxt_get_max_func_vnics(struct bnxt * bp)9030 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9031 {
9032 return bp->hw_resc.max_vnics;
9033 }
9034 #endif
9035
bnxt_get_max_func_stat_ctxs(struct bnxt * bp)9036 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9037 {
9038 return bp->hw_resc.max_stat_ctxs;
9039 }
9040
bnxt_get_max_func_cp_rings(struct bnxt * bp)9041 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9042 {
9043 return bp->hw_resc.max_cp_rings;
9044 }
9045
bnxt_get_max_func_cp_rings_for_en(struct bnxt * bp)9046 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
9047 {
9048 unsigned int cp = bp->hw_resc.max_cp_rings;
9049
9050 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9051 cp -= bnxt_get_ulp_msix_num(bp);
9052
9053 return cp;
9054 }
9055
bnxt_get_max_func_irqs(struct bnxt * bp)9056 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
9057 {
9058 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9059
9060 if (bp->flags & BNXT_FLAG_CHIP_P5)
9061 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
9062
9063 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
9064 }
9065
bnxt_set_max_func_irqs(struct bnxt * bp,unsigned int max_irqs)9066 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
9067 {
9068 bp->hw_resc.max_irqs = max_irqs;
9069 }
9070
bnxt_get_avail_cp_rings_for_en(struct bnxt * bp)9071 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9072 {
9073 unsigned int cp;
9074
9075 cp = bnxt_get_max_func_cp_rings_for_en(bp);
9076 if (bp->flags & BNXT_FLAG_CHIP_P5)
9077 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9078 else
9079 return cp - bp->cp_nr_rings;
9080 }
9081
bnxt_get_avail_stat_ctxs_for_en(struct bnxt * bp)9082 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9083 {
9084 return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
9085 }
9086
bnxt_get_avail_msix(struct bnxt * bp,int num)9087 int bnxt_get_avail_msix(struct bnxt *bp, int num)
9088 {
9089 int max_cp = bnxt_get_max_func_cp_rings(bp);
9090 int max_irq = bnxt_get_max_func_irqs(bp);
9091 int total_req = bp->cp_nr_rings + num;
9092 int max_idx, avail_msix;
9093
9094 max_idx = bp->total_irqs;
9095 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9096 max_idx = min_t(int, bp->total_irqs, max_cp);
9097 avail_msix = max_idx - bp->cp_nr_rings;
9098 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
9099 return avail_msix;
9100
9101 if (max_irq < total_req) {
9102 num = max_irq - bp->cp_nr_rings;
9103 if (num <= 0)
9104 return 0;
9105 }
9106 return num;
9107 }
9108
bnxt_get_num_msix(struct bnxt * bp)9109 static int bnxt_get_num_msix(struct bnxt *bp)
9110 {
9111 if (!BNXT_NEW_RM(bp))
9112 return bnxt_get_max_func_irqs(bp);
9113
9114 return bnxt_nq_rings_in_use(bp);
9115 }
9116
bnxt_init_msix(struct bnxt * bp)9117 static int bnxt_init_msix(struct bnxt *bp)
9118 {
9119 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
9120 struct msix_entry *msix_ent;
9121
9122 total_vecs = bnxt_get_num_msix(bp);
9123 max = bnxt_get_max_func_irqs(bp);
9124 if (total_vecs > max)
9125 total_vecs = max;
9126
9127 if (!total_vecs)
9128 return 0;
9129
9130 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
9131 if (!msix_ent)
9132 return -ENOMEM;
9133
9134 for (i = 0; i < total_vecs; i++) {
9135 msix_ent[i].entry = i;
9136 msix_ent[i].vector = 0;
9137 }
9138
9139 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9140 min = 2;
9141
9142 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
9143 ulp_msix = bnxt_get_ulp_msix_num(bp);
9144 if (total_vecs < 0 || total_vecs < ulp_msix) {
9145 rc = -ENODEV;
9146 goto msix_setup_exit;
9147 }
9148
9149 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9150 if (bp->irq_tbl) {
9151 for (i = 0; i < total_vecs; i++)
9152 bp->irq_tbl[i].vector = msix_ent[i].vector;
9153
9154 bp->total_irqs = total_vecs;
9155 /* Trim rings based upon num of vectors allocated */
9156 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9157 total_vecs - ulp_msix, min == 1);
9158 if (rc)
9159 goto msix_setup_exit;
9160
9161 bp->cp_nr_rings = (min == 1) ?
9162 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9163 bp->tx_nr_rings + bp->rx_nr_rings;
9164
9165 } else {
9166 rc = -ENOMEM;
9167 goto msix_setup_exit;
9168 }
9169 bp->flags |= BNXT_FLAG_USING_MSIX;
9170 kfree(msix_ent);
9171 return 0;
9172
9173 msix_setup_exit:
9174 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9175 kfree(bp->irq_tbl);
9176 bp->irq_tbl = NULL;
9177 pci_disable_msix(bp->pdev);
9178 kfree(msix_ent);
9179 return rc;
9180 }
9181
bnxt_init_inta(struct bnxt * bp)9182 static int bnxt_init_inta(struct bnxt *bp)
9183 {
9184 bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9185 if (!bp->irq_tbl)
9186 return -ENOMEM;
9187
9188 bp->total_irqs = 1;
9189 bp->rx_nr_rings = 1;
9190 bp->tx_nr_rings = 1;
9191 bp->cp_nr_rings = 1;
9192 bp->flags |= BNXT_FLAG_SHARED_RINGS;
9193 bp->irq_tbl[0].vector = bp->pdev->irq;
9194 return 0;
9195 }
9196
bnxt_init_int_mode(struct bnxt * bp)9197 static int bnxt_init_int_mode(struct bnxt *bp)
9198 {
9199 int rc = -ENODEV;
9200
9201 if (bp->flags & BNXT_FLAG_MSIX_CAP)
9202 rc = bnxt_init_msix(bp);
9203
9204 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9205 /* fallback to INTA */
9206 rc = bnxt_init_inta(bp);
9207 }
9208 return rc;
9209 }
9210
bnxt_clear_int_mode(struct bnxt * bp)9211 static void bnxt_clear_int_mode(struct bnxt *bp)
9212 {
9213 if (bp->flags & BNXT_FLAG_USING_MSIX)
9214 pci_disable_msix(bp->pdev);
9215
9216 kfree(bp->irq_tbl);
9217 bp->irq_tbl = NULL;
9218 bp->flags &= ~BNXT_FLAG_USING_MSIX;
9219 }
9220
bnxt_reserve_rings(struct bnxt * bp,bool irq_re_init)9221 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9222 {
9223 int tcs = netdev_get_num_tc(bp->dev);
9224 bool irq_cleared = false;
9225 int rc;
9226
9227 if (!bnxt_need_reserve_rings(bp))
9228 return 0;
9229
9230 if (irq_re_init && BNXT_NEW_RM(bp) &&
9231 bnxt_get_num_msix(bp) != bp->total_irqs) {
9232 bnxt_ulp_irq_stop(bp);
9233 bnxt_clear_int_mode(bp);
9234 irq_cleared = true;
9235 }
9236 rc = __bnxt_reserve_rings(bp);
9237 if (irq_cleared) {
9238 if (!rc)
9239 rc = bnxt_init_int_mode(bp);
9240 bnxt_ulp_irq_restart(bp, rc);
9241 }
9242 if (rc) {
9243 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9244 return rc;
9245 }
9246 if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
9247 bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
9248 netdev_err(bp->dev, "tx ring reservation failure\n");
9249 netdev_reset_tc(bp->dev);
9250 if (bp->tx_nr_rings_xdp)
9251 bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
9252 else
9253 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9254 return -ENOMEM;
9255 }
9256 return 0;
9257 }
9258
bnxt_free_irq(struct bnxt * bp)9259 static void bnxt_free_irq(struct bnxt *bp)
9260 {
9261 struct bnxt_irq *irq;
9262 int i;
9263
9264 #ifdef CONFIG_RFS_ACCEL
9265 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9266 bp->dev->rx_cpu_rmap = NULL;
9267 #endif
9268 if (!bp->irq_tbl || !bp->bnapi)
9269 return;
9270
9271 for (i = 0; i < bp->cp_nr_rings; i++) {
9272 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9273
9274 irq = &bp->irq_tbl[map_idx];
9275 if (irq->requested) {
9276 if (irq->have_cpumask) {
9277 irq_set_affinity_hint(irq->vector, NULL);
9278 free_cpumask_var(irq->cpu_mask);
9279 irq->have_cpumask = 0;
9280 }
9281 free_irq(irq->vector, bp->bnapi[i]);
9282 }
9283
9284 irq->requested = 0;
9285 }
9286 }
9287
bnxt_request_irq(struct bnxt * bp)9288 static int bnxt_request_irq(struct bnxt *bp)
9289 {
9290 int i, j, rc = 0;
9291 unsigned long flags = 0;
9292 #ifdef CONFIG_RFS_ACCEL
9293 struct cpu_rmap *rmap;
9294 #endif
9295
9296 rc = bnxt_setup_int_mode(bp);
9297 if (rc) {
9298 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9299 rc);
9300 return rc;
9301 }
9302 #ifdef CONFIG_RFS_ACCEL
9303 rmap = bp->dev->rx_cpu_rmap;
9304 #endif
9305 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9306 flags = IRQF_SHARED;
9307
9308 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9309 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9310 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9311
9312 #ifdef CONFIG_RFS_ACCEL
9313 if (rmap && bp->bnapi[i]->rx_ring) {
9314 rc = irq_cpu_rmap_add(rmap, irq->vector);
9315 if (rc)
9316 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9317 j);
9318 j++;
9319 }
9320 #endif
9321 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9322 bp->bnapi[i]);
9323 if (rc)
9324 break;
9325
9326 irq->requested = 1;
9327
9328 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9329 int numa_node = dev_to_node(&bp->pdev->dev);
9330
9331 irq->have_cpumask = 1;
9332 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9333 irq->cpu_mask);
9334 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9335 if (rc) {
9336 netdev_warn(bp->dev,
9337 "Set affinity failed, IRQ = %d\n",
9338 irq->vector);
9339 break;
9340 }
9341 }
9342 }
9343 return rc;
9344 }
9345
bnxt_del_napi(struct bnxt * bp)9346 static void bnxt_del_napi(struct bnxt *bp)
9347 {
9348 int i;
9349
9350 if (!bp->bnapi)
9351 return;
9352
9353 for (i = 0; i < bp->cp_nr_rings; i++) {
9354 struct bnxt_napi *bnapi = bp->bnapi[i];
9355
9356 __netif_napi_del(&bnapi->napi);
9357 }
9358 /* We called __netif_napi_del(), we need
9359 * to respect an RCU grace period before freeing napi structures.
9360 */
9361 synchronize_net();
9362 }
9363
bnxt_init_napi(struct bnxt * bp)9364 static void bnxt_init_napi(struct bnxt *bp)
9365 {
9366 int i;
9367 unsigned int cp_nr_rings = bp->cp_nr_rings;
9368 struct bnxt_napi *bnapi;
9369
9370 if (bp->flags & BNXT_FLAG_USING_MSIX) {
9371 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9372
9373 if (bp->flags & BNXT_FLAG_CHIP_P5)
9374 poll_fn = bnxt_poll_p5;
9375 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9376 cp_nr_rings--;
9377 for (i = 0; i < cp_nr_rings; i++) {
9378 bnapi = bp->bnapi[i];
9379 netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
9380 }
9381 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9382 bnapi = bp->bnapi[cp_nr_rings];
9383 netif_napi_add(bp->dev, &bnapi->napi,
9384 bnxt_poll_nitroa0);
9385 }
9386 } else {
9387 bnapi = bp->bnapi[0];
9388 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
9389 }
9390 }
9391
bnxt_disable_napi(struct bnxt * bp)9392 static void bnxt_disable_napi(struct bnxt *bp)
9393 {
9394 int i;
9395
9396 if (!bp->bnapi ||
9397 test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9398 return;
9399
9400 for (i = 0; i < bp->cp_nr_rings; i++) {
9401 struct bnxt_napi *bnapi = bp->bnapi[i];
9402 struct bnxt_cp_ring_info *cpr;
9403
9404 cpr = &bnapi->cp_ring;
9405 if (bnapi->tx_fault)
9406 cpr->sw_stats.tx.tx_resets++;
9407 if (bnapi->in_reset)
9408 cpr->sw_stats.rx.rx_resets++;
9409 napi_disable(&bnapi->napi);
9410 if (bnapi->rx_ring)
9411 cancel_work_sync(&cpr->dim.work);
9412 }
9413 }
9414
bnxt_enable_napi(struct bnxt * bp)9415 static void bnxt_enable_napi(struct bnxt *bp)
9416 {
9417 int i;
9418
9419 clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9420 for (i = 0; i < bp->cp_nr_rings; i++) {
9421 struct bnxt_napi *bnapi = bp->bnapi[i];
9422 struct bnxt_cp_ring_info *cpr;
9423
9424 bnapi->tx_fault = 0;
9425
9426 cpr = &bnapi->cp_ring;
9427 bnapi->in_reset = false;
9428
9429 bnapi->tx_pkts = 0;
9430
9431 if (bnapi->rx_ring) {
9432 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9433 cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9434 }
9435 napi_enable(&bnapi->napi);
9436 }
9437 }
9438
bnxt_tx_disable(struct bnxt * bp)9439 void bnxt_tx_disable(struct bnxt *bp)
9440 {
9441 int i;
9442 struct bnxt_tx_ring_info *txr;
9443
9444 if (bp->tx_ring) {
9445 for (i = 0; i < bp->tx_nr_rings; i++) {
9446 txr = &bp->tx_ring[i];
9447 WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9448 }
9449 }
9450 /* Make sure napi polls see @dev_state change */
9451 synchronize_net();
9452 /* Drop carrier first to prevent TX timeout */
9453 netif_carrier_off(bp->dev);
9454 /* Stop all TX queues */
9455 netif_tx_disable(bp->dev);
9456 }
9457
bnxt_tx_enable(struct bnxt * bp)9458 void bnxt_tx_enable(struct bnxt *bp)
9459 {
9460 int i;
9461 struct bnxt_tx_ring_info *txr;
9462
9463 for (i = 0; i < bp->tx_nr_rings; i++) {
9464 txr = &bp->tx_ring[i];
9465 WRITE_ONCE(txr->dev_state, 0);
9466 }
9467 /* Make sure napi polls see @dev_state change */
9468 synchronize_net();
9469 netif_tx_wake_all_queues(bp->dev);
9470 if (BNXT_LINK_IS_UP(bp))
9471 netif_carrier_on(bp->dev);
9472 }
9473
bnxt_report_fec(struct bnxt_link_info * link_info)9474 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9475 {
9476 u8 active_fec = link_info->active_fec_sig_mode &
9477 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9478
9479 switch (active_fec) {
9480 default:
9481 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9482 return "None";
9483 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9484 return "Clause 74 BaseR";
9485 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9486 return "Clause 91 RS(528,514)";
9487 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9488 return "Clause 91 RS544_1XN";
9489 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9490 return "Clause 91 RS(544,514)";
9491 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9492 return "Clause 91 RS272_1XN";
9493 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9494 return "Clause 91 RS(272,257)";
9495 }
9496 }
9497
bnxt_report_link(struct bnxt * bp)9498 void bnxt_report_link(struct bnxt *bp)
9499 {
9500 if (BNXT_LINK_IS_UP(bp)) {
9501 const char *signal = "";
9502 const char *flow_ctrl;
9503 const char *duplex;
9504 u32 speed;
9505 u16 fec;
9506
9507 netif_carrier_on(bp->dev);
9508 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9509 if (speed == SPEED_UNKNOWN) {
9510 netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9511 return;
9512 }
9513 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9514 duplex = "full";
9515 else
9516 duplex = "half";
9517 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9518 flow_ctrl = "ON - receive & transmit";
9519 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9520 flow_ctrl = "ON - transmit";
9521 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9522 flow_ctrl = "ON - receive";
9523 else
9524 flow_ctrl = "none";
9525 if (bp->link_info.phy_qcfg_resp.option_flags &
9526 PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9527 u8 sig_mode = bp->link_info.active_fec_sig_mode &
9528 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9529 switch (sig_mode) {
9530 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9531 signal = "(NRZ) ";
9532 break;
9533 case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9534 signal = "(PAM4) ";
9535 break;
9536 default:
9537 break;
9538 }
9539 }
9540 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9541 speed, signal, duplex, flow_ctrl);
9542 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9543 netdev_info(bp->dev, "EEE is %s\n",
9544 bp->eee.eee_active ? "active" :
9545 "not active");
9546 fec = bp->link_info.fec_cfg;
9547 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9548 netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9549 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9550 bnxt_report_fec(&bp->link_info));
9551 } else {
9552 netif_carrier_off(bp->dev);
9553 netdev_err(bp->dev, "NIC Link is Down\n");
9554 }
9555 }
9556
bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output * resp)9557 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9558 {
9559 if (!resp->supported_speeds_auto_mode &&
9560 !resp->supported_speeds_force_mode &&
9561 !resp->supported_pam4_speeds_auto_mode &&
9562 !resp->supported_pam4_speeds_force_mode)
9563 return true;
9564 return false;
9565 }
9566
bnxt_hwrm_phy_qcaps(struct bnxt * bp)9567 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9568 {
9569 struct bnxt_link_info *link_info = &bp->link_info;
9570 struct hwrm_port_phy_qcaps_output *resp;
9571 struct hwrm_port_phy_qcaps_input *req;
9572 int rc = 0;
9573
9574 if (bp->hwrm_spec_code < 0x10201)
9575 return 0;
9576
9577 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9578 if (rc)
9579 return rc;
9580
9581 resp = hwrm_req_hold(bp, req);
9582 rc = hwrm_req_send(bp, req);
9583 if (rc)
9584 goto hwrm_phy_qcaps_exit;
9585
9586 bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9587 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9588 struct ethtool_eee *eee = &bp->eee;
9589 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9590
9591 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9592 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9593 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9594 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9595 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9596 }
9597
9598 if (bp->hwrm_spec_code >= 0x10a01) {
9599 if (bnxt_phy_qcaps_no_speed(resp)) {
9600 link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9601 netdev_warn(bp->dev, "Ethernet link disabled\n");
9602 } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9603 link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9604 netdev_info(bp->dev, "Ethernet link enabled\n");
9605 /* Phy re-enabled, reprobe the speeds */
9606 link_info->support_auto_speeds = 0;
9607 link_info->support_pam4_auto_speeds = 0;
9608 }
9609 }
9610 if (resp->supported_speeds_auto_mode)
9611 link_info->support_auto_speeds =
9612 le16_to_cpu(resp->supported_speeds_auto_mode);
9613 if (resp->supported_pam4_speeds_auto_mode)
9614 link_info->support_pam4_auto_speeds =
9615 le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9616
9617 bp->port_count = resp->port_cnt;
9618
9619 hwrm_phy_qcaps_exit:
9620 hwrm_req_drop(bp, req);
9621 return rc;
9622 }
9623
bnxt_support_dropped(u16 advertising,u16 supported)9624 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9625 {
9626 u16 diff = advertising ^ supported;
9627
9628 return ((supported | diff) != supported);
9629 }
9630
bnxt_update_link(struct bnxt * bp,bool chng_link_state)9631 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9632 {
9633 struct bnxt_link_info *link_info = &bp->link_info;
9634 struct hwrm_port_phy_qcfg_output *resp;
9635 struct hwrm_port_phy_qcfg_input *req;
9636 u8 link_state = link_info->link_state;
9637 bool support_changed = false;
9638 int rc;
9639
9640 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9641 if (rc)
9642 return rc;
9643
9644 resp = hwrm_req_hold(bp, req);
9645 rc = hwrm_req_send(bp, req);
9646 if (rc) {
9647 hwrm_req_drop(bp, req);
9648 if (BNXT_VF(bp) && rc == -ENODEV) {
9649 netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9650 rc = 0;
9651 }
9652 return rc;
9653 }
9654
9655 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9656 link_info->phy_link_status = resp->link;
9657 link_info->duplex = resp->duplex_cfg;
9658 if (bp->hwrm_spec_code >= 0x10800)
9659 link_info->duplex = resp->duplex_state;
9660 link_info->pause = resp->pause;
9661 link_info->auto_mode = resp->auto_mode;
9662 link_info->auto_pause_setting = resp->auto_pause;
9663 link_info->lp_pause = resp->link_partner_adv_pause;
9664 link_info->force_pause_setting = resp->force_pause;
9665 link_info->duplex_setting = resp->duplex_cfg;
9666 if (link_info->phy_link_status == BNXT_LINK_LINK)
9667 link_info->link_speed = le16_to_cpu(resp->link_speed);
9668 else
9669 link_info->link_speed = 0;
9670 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9671 link_info->force_pam4_link_speed =
9672 le16_to_cpu(resp->force_pam4_link_speed);
9673 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9674 link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9675 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9676 link_info->auto_pam4_link_speeds =
9677 le16_to_cpu(resp->auto_pam4_link_speed_mask);
9678 link_info->lp_auto_link_speeds =
9679 le16_to_cpu(resp->link_partner_adv_speeds);
9680 link_info->lp_auto_pam4_link_speeds =
9681 resp->link_partner_pam4_adv_speeds;
9682 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9683 link_info->phy_ver[0] = resp->phy_maj;
9684 link_info->phy_ver[1] = resp->phy_min;
9685 link_info->phy_ver[2] = resp->phy_bld;
9686 link_info->media_type = resp->media_type;
9687 link_info->phy_type = resp->phy_type;
9688 link_info->transceiver = resp->xcvr_pkg_type;
9689 link_info->phy_addr = resp->eee_config_phy_addr &
9690 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9691 link_info->module_status = resp->module_status;
9692
9693 if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9694 struct ethtool_eee *eee = &bp->eee;
9695 u16 fw_speeds;
9696
9697 eee->eee_active = 0;
9698 if (resp->eee_config_phy_addr &
9699 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9700 eee->eee_active = 1;
9701 fw_speeds = le16_to_cpu(
9702 resp->link_partner_adv_eee_link_speed_mask);
9703 eee->lp_advertised =
9704 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9705 }
9706
9707 /* Pull initial EEE config */
9708 if (!chng_link_state) {
9709 if (resp->eee_config_phy_addr &
9710 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9711 eee->eee_enabled = 1;
9712
9713 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9714 eee->advertised =
9715 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9716
9717 if (resp->eee_config_phy_addr &
9718 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9719 __le32 tmr;
9720
9721 eee->tx_lpi_enabled = 1;
9722 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9723 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9724 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9725 }
9726 }
9727 }
9728
9729 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9730 if (bp->hwrm_spec_code >= 0x10504) {
9731 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9732 link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9733 }
9734 /* TODO: need to add more logic to report VF link */
9735 if (chng_link_state) {
9736 if (link_info->phy_link_status == BNXT_LINK_LINK)
9737 link_info->link_state = BNXT_LINK_STATE_UP;
9738 else
9739 link_info->link_state = BNXT_LINK_STATE_DOWN;
9740 if (link_state != link_info->link_state)
9741 bnxt_report_link(bp);
9742 } else {
9743 /* always link down if not require to update link state */
9744 link_info->link_state = BNXT_LINK_STATE_DOWN;
9745 }
9746 hwrm_req_drop(bp, req);
9747
9748 if (!BNXT_PHY_CFG_ABLE(bp))
9749 return 0;
9750
9751 /* Check if any advertised speeds are no longer supported. The caller
9752 * holds the link_lock mutex, so we can modify link_info settings.
9753 */
9754 if (bnxt_support_dropped(link_info->advertising,
9755 link_info->support_auto_speeds)) {
9756 link_info->advertising = link_info->support_auto_speeds;
9757 support_changed = true;
9758 }
9759 if (bnxt_support_dropped(link_info->advertising_pam4,
9760 link_info->support_pam4_auto_speeds)) {
9761 link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9762 support_changed = true;
9763 }
9764 if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9765 bnxt_hwrm_set_link_setting(bp, true, false);
9766 return 0;
9767 }
9768
bnxt_get_port_module_status(struct bnxt * bp)9769 static void bnxt_get_port_module_status(struct bnxt *bp)
9770 {
9771 struct bnxt_link_info *link_info = &bp->link_info;
9772 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9773 u8 module_status;
9774
9775 if (bnxt_update_link(bp, true))
9776 return;
9777
9778 module_status = link_info->module_status;
9779 switch (module_status) {
9780 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9781 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9782 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9783 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9784 bp->pf.port_id);
9785 if (bp->hwrm_spec_code >= 0x10201) {
9786 netdev_warn(bp->dev, "Module part number %s\n",
9787 resp->phy_vendor_partnumber);
9788 }
9789 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9790 netdev_warn(bp->dev, "TX is disabled\n");
9791 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9792 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9793 }
9794 }
9795
9796 static void
bnxt_hwrm_set_pause_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)9797 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9798 {
9799 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9800 if (bp->hwrm_spec_code >= 0x10201)
9801 req->auto_pause =
9802 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9803 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9804 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9805 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9806 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9807 req->enables |=
9808 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9809 } else {
9810 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9811 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9812 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9813 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9814 req->enables |=
9815 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9816 if (bp->hwrm_spec_code >= 0x10201) {
9817 req->auto_pause = req->force_pause;
9818 req->enables |= cpu_to_le32(
9819 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9820 }
9821 }
9822 }
9823
bnxt_hwrm_set_link_common(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)9824 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9825 {
9826 if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9827 req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9828 if (bp->link_info.advertising) {
9829 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9830 req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9831 }
9832 if (bp->link_info.advertising_pam4) {
9833 req->enables |=
9834 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9835 req->auto_link_pam4_speed_mask =
9836 cpu_to_le16(bp->link_info.advertising_pam4);
9837 }
9838 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9839 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9840 } else {
9841 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9842 if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9843 req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9844 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9845 } else {
9846 req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9847 }
9848 }
9849
9850 /* tell chimp that the setting takes effect immediately */
9851 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9852 }
9853
bnxt_hwrm_set_pause(struct bnxt * bp)9854 int bnxt_hwrm_set_pause(struct bnxt *bp)
9855 {
9856 struct hwrm_port_phy_cfg_input *req;
9857 int rc;
9858
9859 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9860 if (rc)
9861 return rc;
9862
9863 bnxt_hwrm_set_pause_common(bp, req);
9864
9865 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9866 bp->link_info.force_link_chng)
9867 bnxt_hwrm_set_link_common(bp, req);
9868
9869 rc = hwrm_req_send(bp, req);
9870 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9871 /* since changing of pause setting doesn't trigger any link
9872 * change event, the driver needs to update the current pause
9873 * result upon successfully return of the phy_cfg command
9874 */
9875 bp->link_info.pause =
9876 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9877 bp->link_info.auto_pause_setting = 0;
9878 if (!bp->link_info.force_link_chng)
9879 bnxt_report_link(bp);
9880 }
9881 bp->link_info.force_link_chng = false;
9882 return rc;
9883 }
9884
bnxt_hwrm_set_eee(struct bnxt * bp,struct hwrm_port_phy_cfg_input * req)9885 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9886 struct hwrm_port_phy_cfg_input *req)
9887 {
9888 struct ethtool_eee *eee = &bp->eee;
9889
9890 if (eee->eee_enabled) {
9891 u16 eee_speeds;
9892 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9893
9894 if (eee->tx_lpi_enabled)
9895 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9896 else
9897 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9898
9899 req->flags |= cpu_to_le32(flags);
9900 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9901 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9902 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9903 } else {
9904 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9905 }
9906 }
9907
bnxt_hwrm_set_link_setting(struct bnxt * bp,bool set_pause,bool set_eee)9908 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9909 {
9910 struct hwrm_port_phy_cfg_input *req;
9911 int rc;
9912
9913 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9914 if (rc)
9915 return rc;
9916
9917 if (set_pause)
9918 bnxt_hwrm_set_pause_common(bp, req);
9919
9920 bnxt_hwrm_set_link_common(bp, req);
9921
9922 if (set_eee)
9923 bnxt_hwrm_set_eee(bp, req);
9924 return hwrm_req_send(bp, req);
9925 }
9926
bnxt_hwrm_shutdown_link(struct bnxt * bp)9927 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9928 {
9929 struct hwrm_port_phy_cfg_input *req;
9930 int rc;
9931
9932 if (!BNXT_SINGLE_PF(bp))
9933 return 0;
9934
9935 if (pci_num_vf(bp->pdev) &&
9936 !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9937 return 0;
9938
9939 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9940 if (rc)
9941 return rc;
9942
9943 req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9944 rc = hwrm_req_send(bp, req);
9945 if (!rc) {
9946 mutex_lock(&bp->link_lock);
9947 /* Device is not obliged link down in certain scenarios, even
9948 * when forced. Setting the state unknown is consistent with
9949 * driver startup and will force link state to be reported
9950 * during subsequent open based on PORT_PHY_QCFG.
9951 */
9952 bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9953 mutex_unlock(&bp->link_lock);
9954 }
9955 return rc;
9956 }
9957
bnxt_fw_reset_via_optee(struct bnxt * bp)9958 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9959 {
9960 #ifdef CONFIG_TEE_BNXT_FW
9961 int rc = tee_bnxt_fw_load();
9962
9963 if (rc)
9964 netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9965
9966 return rc;
9967 #else
9968 netdev_err(bp->dev, "OP-TEE not supported\n");
9969 return -ENODEV;
9970 #endif
9971 }
9972
bnxt_try_recover_fw(struct bnxt * bp)9973 static int bnxt_try_recover_fw(struct bnxt *bp)
9974 {
9975 if (bp->fw_health && bp->fw_health->status_reliable) {
9976 int retry = 0, rc;
9977 u32 sts;
9978
9979 do {
9980 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9981 rc = bnxt_hwrm_poll(bp);
9982 if (!BNXT_FW_IS_BOOTING(sts) &&
9983 !BNXT_FW_IS_RECOVERING(sts))
9984 break;
9985 retry++;
9986 } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9987
9988 if (!BNXT_FW_IS_HEALTHY(sts)) {
9989 netdev_err(bp->dev,
9990 "Firmware not responding, status: 0x%x\n",
9991 sts);
9992 rc = -ENODEV;
9993 }
9994 if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9995 netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9996 return bnxt_fw_reset_via_optee(bp);
9997 }
9998 return rc;
9999 }
10000
10001 return -ENODEV;
10002 }
10003
bnxt_clear_reservations(struct bnxt * bp,bool fw_reset)10004 static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset)
10005 {
10006 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
10007
10008 if (!BNXT_NEW_RM(bp))
10009 return; /* no resource reservations required */
10010
10011 hw_resc->resv_cp_rings = 0;
10012 hw_resc->resv_stat_ctxs = 0;
10013 hw_resc->resv_irqs = 0;
10014 hw_resc->resv_tx_rings = 0;
10015 hw_resc->resv_rx_rings = 0;
10016 hw_resc->resv_hw_ring_grps = 0;
10017 hw_resc->resv_vnics = 0;
10018 if (!fw_reset) {
10019 bp->tx_nr_rings = 0;
10020 bp->rx_nr_rings = 0;
10021 }
10022 }
10023
bnxt_cancel_reservations(struct bnxt * bp,bool fw_reset)10024 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
10025 {
10026 int rc;
10027
10028 if (!BNXT_NEW_RM(bp))
10029 return 0; /* no resource reservations required */
10030
10031 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
10032 if (rc)
10033 netdev_err(bp->dev, "resc_qcaps failed\n");
10034
10035 bnxt_clear_reservations(bp, fw_reset);
10036
10037 return rc;
10038 }
10039
bnxt_hwrm_if_change(struct bnxt * bp,bool up)10040 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10041 {
10042 struct hwrm_func_drv_if_change_output *resp;
10043 struct hwrm_func_drv_if_change_input *req;
10044 bool fw_reset = !bp->irq_tbl;
10045 bool resc_reinit = false;
10046 int rc, retry = 0;
10047 u32 flags = 0;
10048
10049 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10050 return 0;
10051
10052 rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10053 if (rc)
10054 return rc;
10055
10056 if (up)
10057 req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
10058 resp = hwrm_req_hold(bp, req);
10059
10060 hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10061 while (retry < BNXT_FW_IF_RETRY) {
10062 rc = hwrm_req_send(bp, req);
10063 if (rc != -EAGAIN)
10064 break;
10065
10066 msleep(50);
10067 retry++;
10068 }
10069
10070 if (rc == -EAGAIN) {
10071 hwrm_req_drop(bp, req);
10072 return rc;
10073 } else if (!rc) {
10074 flags = le32_to_cpu(resp->flags);
10075 } else if (up) {
10076 rc = bnxt_try_recover_fw(bp);
10077 fw_reset = true;
10078 }
10079 hwrm_req_drop(bp, req);
10080 if (rc)
10081 return rc;
10082
10083 if (!up) {
10084 bnxt_inv_fw_health_reg(bp);
10085 return 0;
10086 }
10087
10088 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
10089 resc_reinit = true;
10090 if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
10091 test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
10092 fw_reset = true;
10093 else
10094 bnxt_remap_fw_health_regs(bp);
10095
10096 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10097 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
10098 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10099 return -ENODEV;
10100 }
10101 if (resc_reinit || fw_reset) {
10102 if (fw_reset) {
10103 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10104 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10105 bnxt_ulp_stop(bp);
10106 bnxt_free_ctx_mem(bp);
10107 kfree(bp->ctx);
10108 bp->ctx = NULL;
10109 bnxt_dcb_free(bp);
10110 rc = bnxt_fw_init_one(bp);
10111 if (rc) {
10112 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10113 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10114 return rc;
10115 }
10116 bnxt_clear_int_mode(bp);
10117 rc = bnxt_init_int_mode(bp);
10118 if (rc) {
10119 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10120 netdev_err(bp->dev, "init int mode failed\n");
10121 return rc;
10122 }
10123 }
10124 rc = bnxt_cancel_reservations(bp, fw_reset);
10125 }
10126 return rc;
10127 }
10128
bnxt_hwrm_port_led_qcaps(struct bnxt * bp)10129 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10130 {
10131 struct hwrm_port_led_qcaps_output *resp;
10132 struct hwrm_port_led_qcaps_input *req;
10133 struct bnxt_pf_info *pf = &bp->pf;
10134 int rc;
10135
10136 bp->num_leds = 0;
10137 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10138 return 0;
10139
10140 rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10141 if (rc)
10142 return rc;
10143
10144 req->port_id = cpu_to_le16(pf->port_id);
10145 resp = hwrm_req_hold(bp, req);
10146 rc = hwrm_req_send(bp, req);
10147 if (rc) {
10148 hwrm_req_drop(bp, req);
10149 return rc;
10150 }
10151 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
10152 int i;
10153
10154 bp->num_leds = resp->num_leds;
10155 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10156 bp->num_leds);
10157 for (i = 0; i < bp->num_leds; i++) {
10158 struct bnxt_led_info *led = &bp->leds[i];
10159 __le16 caps = led->led_state_caps;
10160
10161 if (!led->led_group_id ||
10162 !BNXT_LED_ALT_BLINK_CAP(caps)) {
10163 bp->num_leds = 0;
10164 break;
10165 }
10166 }
10167 }
10168 hwrm_req_drop(bp, req);
10169 return 0;
10170 }
10171
bnxt_hwrm_alloc_wol_fltr(struct bnxt * bp)10172 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10173 {
10174 struct hwrm_wol_filter_alloc_output *resp;
10175 struct hwrm_wol_filter_alloc_input *req;
10176 int rc;
10177
10178 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10179 if (rc)
10180 return rc;
10181
10182 req->port_id = cpu_to_le16(bp->pf.port_id);
10183 req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10184 req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10185 memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10186
10187 resp = hwrm_req_hold(bp, req);
10188 rc = hwrm_req_send(bp, req);
10189 if (!rc)
10190 bp->wol_filter_id = resp->wol_filter_id;
10191 hwrm_req_drop(bp, req);
10192 return rc;
10193 }
10194
bnxt_hwrm_free_wol_fltr(struct bnxt * bp)10195 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10196 {
10197 struct hwrm_wol_filter_free_input *req;
10198 int rc;
10199
10200 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10201 if (rc)
10202 return rc;
10203
10204 req->port_id = cpu_to_le16(bp->pf.port_id);
10205 req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10206 req->wol_filter_id = bp->wol_filter_id;
10207
10208 return hwrm_req_send(bp, req);
10209 }
10210
bnxt_hwrm_get_wol_fltrs(struct bnxt * bp,u16 handle)10211 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10212 {
10213 struct hwrm_wol_filter_qcfg_output *resp;
10214 struct hwrm_wol_filter_qcfg_input *req;
10215 u16 next_handle = 0;
10216 int rc;
10217
10218 rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10219 if (rc)
10220 return rc;
10221
10222 req->port_id = cpu_to_le16(bp->pf.port_id);
10223 req->handle = cpu_to_le16(handle);
10224 resp = hwrm_req_hold(bp, req);
10225 rc = hwrm_req_send(bp, req);
10226 if (!rc) {
10227 next_handle = le16_to_cpu(resp->next_handle);
10228 if (next_handle != 0) {
10229 if (resp->wol_type ==
10230 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10231 bp->wol = 1;
10232 bp->wol_filter_id = resp->wol_filter_id;
10233 }
10234 }
10235 }
10236 hwrm_req_drop(bp, req);
10237 return next_handle;
10238 }
10239
bnxt_get_wol_settings(struct bnxt * bp)10240 static void bnxt_get_wol_settings(struct bnxt *bp)
10241 {
10242 u16 handle = 0;
10243
10244 bp->wol = 0;
10245 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10246 return;
10247
10248 do {
10249 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10250 } while (handle && handle != 0xffff);
10251 }
10252
10253 #ifdef CONFIG_BNXT_HWMON
bnxt_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10254 static ssize_t bnxt_show_temp(struct device *dev,
10255 struct device_attribute *devattr, char *buf)
10256 {
10257 struct hwrm_temp_monitor_query_output *resp;
10258 struct hwrm_temp_monitor_query_input *req;
10259 struct bnxt *bp = dev_get_drvdata(dev);
10260 u32 len = 0;
10261 int rc;
10262
10263 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10264 if (rc)
10265 return rc;
10266 resp = hwrm_req_hold(bp, req);
10267 rc = hwrm_req_send(bp, req);
10268 if (!rc)
10269 len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10270 hwrm_req_drop(bp, req);
10271 if (rc)
10272 return rc;
10273 return len;
10274 }
10275 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10276
10277 static struct attribute *bnxt_attrs[] = {
10278 &sensor_dev_attr_temp1_input.dev_attr.attr,
10279 NULL
10280 };
10281 ATTRIBUTE_GROUPS(bnxt);
10282
bnxt_hwmon_close(struct bnxt * bp)10283 static void bnxt_hwmon_close(struct bnxt *bp)
10284 {
10285 if (bp->hwmon_dev) {
10286 hwmon_device_unregister(bp->hwmon_dev);
10287 bp->hwmon_dev = NULL;
10288 }
10289 }
10290
bnxt_hwmon_open(struct bnxt * bp)10291 static void bnxt_hwmon_open(struct bnxt *bp)
10292 {
10293 struct hwrm_temp_monitor_query_input *req;
10294 struct pci_dev *pdev = bp->pdev;
10295 int rc;
10296
10297 rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10298 if (!rc)
10299 rc = hwrm_req_send_silent(bp, req);
10300 if (rc == -EACCES || rc == -EOPNOTSUPP) {
10301 bnxt_hwmon_close(bp);
10302 return;
10303 }
10304
10305 if (bp->hwmon_dev)
10306 return;
10307
10308 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10309 DRV_MODULE_NAME, bp,
10310 bnxt_groups);
10311 if (IS_ERR(bp->hwmon_dev)) {
10312 bp->hwmon_dev = NULL;
10313 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10314 }
10315 }
10316 #else
bnxt_hwmon_close(struct bnxt * bp)10317 static void bnxt_hwmon_close(struct bnxt *bp)
10318 {
10319 }
10320
bnxt_hwmon_open(struct bnxt * bp)10321 static void bnxt_hwmon_open(struct bnxt *bp)
10322 {
10323 }
10324 #endif
10325
bnxt_eee_config_ok(struct bnxt * bp)10326 static bool bnxt_eee_config_ok(struct bnxt *bp)
10327 {
10328 struct ethtool_eee *eee = &bp->eee;
10329 struct bnxt_link_info *link_info = &bp->link_info;
10330
10331 if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10332 return true;
10333
10334 if (eee->eee_enabled) {
10335 u32 advertising =
10336 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10337
10338 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10339 eee->eee_enabled = 0;
10340 return false;
10341 }
10342 if (eee->advertised & ~advertising) {
10343 eee->advertised = advertising & eee->supported;
10344 return false;
10345 }
10346 }
10347 return true;
10348 }
10349
bnxt_update_phy_setting(struct bnxt * bp)10350 static int bnxt_update_phy_setting(struct bnxt *bp)
10351 {
10352 int rc;
10353 bool update_link = false;
10354 bool update_pause = false;
10355 bool update_eee = false;
10356 struct bnxt_link_info *link_info = &bp->link_info;
10357
10358 rc = bnxt_update_link(bp, true);
10359 if (rc) {
10360 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10361 rc);
10362 return rc;
10363 }
10364 if (!BNXT_SINGLE_PF(bp))
10365 return 0;
10366
10367 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10368 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10369 link_info->req_flow_ctrl)
10370 update_pause = true;
10371 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10372 link_info->force_pause_setting != link_info->req_flow_ctrl)
10373 update_pause = true;
10374 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10375 if (BNXT_AUTO_MODE(link_info->auto_mode))
10376 update_link = true;
10377 if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10378 link_info->req_link_speed != link_info->force_link_speed)
10379 update_link = true;
10380 else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10381 link_info->req_link_speed != link_info->force_pam4_link_speed)
10382 update_link = true;
10383 if (link_info->req_duplex != link_info->duplex_setting)
10384 update_link = true;
10385 } else {
10386 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10387 update_link = true;
10388 if (link_info->advertising != link_info->auto_link_speeds ||
10389 link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10390 update_link = true;
10391 }
10392
10393 /* The last close may have shutdown the link, so need to call
10394 * PHY_CFG to bring it back up.
10395 */
10396 if (!BNXT_LINK_IS_UP(bp))
10397 update_link = true;
10398
10399 if (!bnxt_eee_config_ok(bp))
10400 update_eee = true;
10401
10402 if (update_link)
10403 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10404 else if (update_pause)
10405 rc = bnxt_hwrm_set_pause(bp);
10406 if (rc) {
10407 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10408 rc);
10409 return rc;
10410 }
10411
10412 return rc;
10413 }
10414
10415 /* Common routine to pre-map certain register block to different GRC window.
10416 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10417 * in PF and 3 windows in VF that can be customized to map in different
10418 * register blocks.
10419 */
bnxt_preset_reg_win(struct bnxt * bp)10420 static void bnxt_preset_reg_win(struct bnxt *bp)
10421 {
10422 if (BNXT_PF(bp)) {
10423 /* CAG registers map to GRC window #4 */
10424 writel(BNXT_CAG_REG_BASE,
10425 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10426 }
10427 }
10428
10429 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10430
bnxt_reinit_after_abort(struct bnxt * bp)10431 static int bnxt_reinit_after_abort(struct bnxt *bp)
10432 {
10433 int rc;
10434
10435 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10436 return -EBUSY;
10437
10438 if (bp->dev->reg_state == NETREG_UNREGISTERED)
10439 return -ENODEV;
10440
10441 rc = bnxt_fw_init_one(bp);
10442 if (!rc) {
10443 bnxt_clear_int_mode(bp);
10444 rc = bnxt_init_int_mode(bp);
10445 if (!rc) {
10446 clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10447 set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10448 }
10449 }
10450 return rc;
10451 }
10452
__bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)10453 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10454 {
10455 int rc = 0;
10456
10457 bnxt_preset_reg_win(bp);
10458 netif_carrier_off(bp->dev);
10459 if (irq_re_init) {
10460 /* Reserve rings now if none were reserved at driver probe. */
10461 rc = bnxt_init_dflt_ring_mode(bp);
10462 if (rc) {
10463 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10464 return rc;
10465 }
10466 }
10467 rc = bnxt_reserve_rings(bp, irq_re_init);
10468 if (rc)
10469 return rc;
10470 if ((bp->flags & BNXT_FLAG_RFS) &&
10471 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10472 /* disable RFS if falling back to INTA */
10473 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10474 bp->flags &= ~BNXT_FLAG_RFS;
10475 }
10476
10477 rc = bnxt_alloc_mem(bp, irq_re_init);
10478 if (rc) {
10479 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10480 goto open_err_free_mem;
10481 }
10482
10483 if (irq_re_init) {
10484 bnxt_init_napi(bp);
10485 rc = bnxt_request_irq(bp);
10486 if (rc) {
10487 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10488 goto open_err_irq;
10489 }
10490 }
10491
10492 rc = bnxt_init_nic(bp, irq_re_init);
10493 if (rc) {
10494 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10495 goto open_err_irq;
10496 }
10497
10498 bnxt_enable_napi(bp);
10499 bnxt_debug_dev_init(bp);
10500
10501 if (link_re_init) {
10502 mutex_lock(&bp->link_lock);
10503 rc = bnxt_update_phy_setting(bp);
10504 mutex_unlock(&bp->link_lock);
10505 if (rc) {
10506 netdev_warn(bp->dev, "failed to update phy settings\n");
10507 if (BNXT_SINGLE_PF(bp)) {
10508 bp->link_info.phy_retry = true;
10509 bp->link_info.phy_retry_expires =
10510 jiffies + 5 * HZ;
10511 }
10512 }
10513 }
10514
10515 if (irq_re_init)
10516 udp_tunnel_nic_reset_ntf(bp->dev);
10517
10518 if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10519 if (!static_key_enabled(&bnxt_xdp_locking_key))
10520 static_branch_enable(&bnxt_xdp_locking_key);
10521 } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
10522 static_branch_disable(&bnxt_xdp_locking_key);
10523 }
10524 set_bit(BNXT_STATE_OPEN, &bp->state);
10525 bnxt_enable_int(bp);
10526 /* Enable TX queues */
10527 bnxt_tx_enable(bp);
10528 mod_timer(&bp->timer, jiffies + bp->current_interval);
10529 /* Poll link status and check for SFP+ module status */
10530 mutex_lock(&bp->link_lock);
10531 bnxt_get_port_module_status(bp);
10532 mutex_unlock(&bp->link_lock);
10533
10534 /* VF-reps may need to be re-opened after the PF is re-opened */
10535 if (BNXT_PF(bp))
10536 bnxt_vf_reps_open(bp);
10537 bnxt_ptp_init_rtc(bp, true);
10538 bnxt_ptp_cfg_tstamp_filters(bp);
10539 return 0;
10540
10541 open_err_irq:
10542 bnxt_del_napi(bp);
10543
10544 open_err_free_mem:
10545 bnxt_free_skbs(bp);
10546 bnxt_free_irq(bp);
10547 bnxt_free_mem(bp, true);
10548 return rc;
10549 }
10550
10551 /* rtnl_lock held */
bnxt_open_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)10552 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10553 {
10554 int rc = 0;
10555
10556 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10557 rc = -EIO;
10558 if (!rc)
10559 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10560 if (rc) {
10561 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10562 dev_close(bp->dev);
10563 }
10564 return rc;
10565 }
10566
10567 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10568 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
10569 * self tests.
10570 */
bnxt_half_open_nic(struct bnxt * bp)10571 int bnxt_half_open_nic(struct bnxt *bp)
10572 {
10573 int rc = 0;
10574
10575 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10576 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10577 rc = -ENODEV;
10578 goto half_open_err;
10579 }
10580
10581 rc = bnxt_alloc_mem(bp, true);
10582 if (rc) {
10583 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10584 goto half_open_err;
10585 }
10586 set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10587 rc = bnxt_init_nic(bp, true);
10588 if (rc) {
10589 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10590 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10591 goto half_open_err;
10592 }
10593 return 0;
10594
10595 half_open_err:
10596 bnxt_free_skbs(bp);
10597 bnxt_free_mem(bp, true);
10598 dev_close(bp->dev);
10599 return rc;
10600 }
10601
10602 /* rtnl_lock held, this call can only be made after a previous successful
10603 * call to bnxt_half_open_nic().
10604 */
bnxt_half_close_nic(struct bnxt * bp)10605 void bnxt_half_close_nic(struct bnxt *bp)
10606 {
10607 bnxt_hwrm_resource_free(bp, false, true);
10608 bnxt_free_skbs(bp);
10609 bnxt_free_mem(bp, true);
10610 clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10611 }
10612
bnxt_reenable_sriov(struct bnxt * bp)10613 void bnxt_reenable_sriov(struct bnxt *bp)
10614 {
10615 if (BNXT_PF(bp)) {
10616 struct bnxt_pf_info *pf = &bp->pf;
10617 int n = pf->active_vfs;
10618
10619 if (n)
10620 bnxt_cfg_hw_sriov(bp, &n, true);
10621 }
10622 }
10623
bnxt_open(struct net_device * dev)10624 static int bnxt_open(struct net_device *dev)
10625 {
10626 struct bnxt *bp = netdev_priv(dev);
10627 int rc;
10628
10629 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10630 rc = bnxt_reinit_after_abort(bp);
10631 if (rc) {
10632 if (rc == -EBUSY)
10633 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10634 else
10635 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10636 return -ENODEV;
10637 }
10638 }
10639
10640 rc = bnxt_hwrm_if_change(bp, true);
10641 if (rc)
10642 return rc;
10643
10644 rc = __bnxt_open_nic(bp, true, true);
10645 if (rc) {
10646 bnxt_hwrm_if_change(bp, false);
10647 } else {
10648 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10649 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10650 bnxt_ulp_start(bp, 0);
10651 bnxt_reenable_sriov(bp);
10652 }
10653 }
10654 bnxt_hwmon_open(bp);
10655 }
10656
10657 return rc;
10658 }
10659
bnxt_drv_busy(struct bnxt * bp)10660 static bool bnxt_drv_busy(struct bnxt *bp)
10661 {
10662 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10663 test_bit(BNXT_STATE_READ_STATS, &bp->state));
10664 }
10665
10666 static void bnxt_get_ring_stats(struct bnxt *bp,
10667 struct rtnl_link_stats64 *stats);
10668
__bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)10669 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10670 bool link_re_init)
10671 {
10672 /* Close the VF-reps before closing PF */
10673 if (BNXT_PF(bp))
10674 bnxt_vf_reps_close(bp);
10675
10676 /* Change device state to avoid TX queue wake up's */
10677 bnxt_tx_disable(bp);
10678
10679 clear_bit(BNXT_STATE_OPEN, &bp->state);
10680 smp_mb__after_atomic();
10681 while (bnxt_drv_busy(bp))
10682 msleep(20);
10683
10684 /* Flush rings and disable interrupts */
10685 bnxt_shutdown_nic(bp, irq_re_init);
10686
10687 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10688
10689 bnxt_debug_dev_exit(bp);
10690 bnxt_disable_napi(bp);
10691 del_timer_sync(&bp->timer);
10692 bnxt_free_skbs(bp);
10693
10694 /* Save ring stats before shutdown */
10695 if (bp->bnapi && irq_re_init) {
10696 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10697 bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev);
10698 }
10699 if (irq_re_init) {
10700 bnxt_free_irq(bp);
10701 bnxt_del_napi(bp);
10702 }
10703 bnxt_free_mem(bp, irq_re_init);
10704 }
10705
bnxt_close_nic(struct bnxt * bp,bool irq_re_init,bool link_re_init)10706 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10707 {
10708 int rc = 0;
10709
10710 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10711 /* If we get here, it means firmware reset is in progress
10712 * while we are trying to close. We can safely proceed with
10713 * the close because we are holding rtnl_lock(). Some firmware
10714 * messages may fail as we proceed to close. We set the
10715 * ABORT_ERR flag here so that the FW reset thread will later
10716 * abort when it gets the rtnl_lock() and sees the flag.
10717 */
10718 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10719 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10720 }
10721
10722 #ifdef CONFIG_BNXT_SRIOV
10723 if (bp->sriov_cfg) {
10724 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10725 !bp->sriov_cfg,
10726 BNXT_SRIOV_CFG_WAIT_TMO);
10727 if (rc)
10728 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10729 }
10730 #endif
10731 __bnxt_close_nic(bp, irq_re_init, link_re_init);
10732 return rc;
10733 }
10734
bnxt_close(struct net_device * dev)10735 static int bnxt_close(struct net_device *dev)
10736 {
10737 struct bnxt *bp = netdev_priv(dev);
10738
10739 bnxt_hwmon_close(bp);
10740 bnxt_close_nic(bp, true, true);
10741 bnxt_hwrm_shutdown_link(bp);
10742 bnxt_hwrm_if_change(bp, false);
10743 return 0;
10744 }
10745
bnxt_hwrm_port_phy_read(struct bnxt * bp,u16 phy_addr,u16 reg,u16 * val)10746 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10747 u16 *val)
10748 {
10749 struct hwrm_port_phy_mdio_read_output *resp;
10750 struct hwrm_port_phy_mdio_read_input *req;
10751 int rc;
10752
10753 if (bp->hwrm_spec_code < 0x10a00)
10754 return -EOPNOTSUPP;
10755
10756 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10757 if (rc)
10758 return rc;
10759
10760 req->port_id = cpu_to_le16(bp->pf.port_id);
10761 req->phy_addr = phy_addr;
10762 req->reg_addr = cpu_to_le16(reg & 0x1f);
10763 if (mdio_phy_id_is_c45(phy_addr)) {
10764 req->cl45_mdio = 1;
10765 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10766 req->dev_addr = mdio_phy_id_devad(phy_addr);
10767 req->reg_addr = cpu_to_le16(reg);
10768 }
10769
10770 resp = hwrm_req_hold(bp, req);
10771 rc = hwrm_req_send(bp, req);
10772 if (!rc)
10773 *val = le16_to_cpu(resp->reg_data);
10774 hwrm_req_drop(bp, req);
10775 return rc;
10776 }
10777
bnxt_hwrm_port_phy_write(struct bnxt * bp,u16 phy_addr,u16 reg,u16 val)10778 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10779 u16 val)
10780 {
10781 struct hwrm_port_phy_mdio_write_input *req;
10782 int rc;
10783
10784 if (bp->hwrm_spec_code < 0x10a00)
10785 return -EOPNOTSUPP;
10786
10787 rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10788 if (rc)
10789 return rc;
10790
10791 req->port_id = cpu_to_le16(bp->pf.port_id);
10792 req->phy_addr = phy_addr;
10793 req->reg_addr = cpu_to_le16(reg & 0x1f);
10794 if (mdio_phy_id_is_c45(phy_addr)) {
10795 req->cl45_mdio = 1;
10796 req->phy_addr = mdio_phy_id_prtad(phy_addr);
10797 req->dev_addr = mdio_phy_id_devad(phy_addr);
10798 req->reg_addr = cpu_to_le16(reg);
10799 }
10800 req->reg_data = cpu_to_le16(val);
10801
10802 return hwrm_req_send(bp, req);
10803 }
10804
10805 /* rtnl_lock held */
bnxt_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)10806 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10807 {
10808 struct mii_ioctl_data *mdio = if_mii(ifr);
10809 struct bnxt *bp = netdev_priv(dev);
10810 int rc;
10811
10812 switch (cmd) {
10813 case SIOCGMIIPHY:
10814 mdio->phy_id = bp->link_info.phy_addr;
10815
10816 fallthrough;
10817 case SIOCGMIIREG: {
10818 u16 mii_regval = 0;
10819
10820 if (!netif_running(dev))
10821 return -EAGAIN;
10822
10823 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10824 &mii_regval);
10825 mdio->val_out = mii_regval;
10826 return rc;
10827 }
10828
10829 case SIOCSMIIREG:
10830 if (!netif_running(dev))
10831 return -EAGAIN;
10832
10833 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10834 mdio->val_in);
10835
10836 case SIOCSHWTSTAMP:
10837 return bnxt_hwtstamp_set(dev, ifr);
10838
10839 case SIOCGHWTSTAMP:
10840 return bnxt_hwtstamp_get(dev, ifr);
10841
10842 default:
10843 /* do nothing */
10844 break;
10845 }
10846 return -EOPNOTSUPP;
10847 }
10848
bnxt_get_ring_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)10849 static void bnxt_get_ring_stats(struct bnxt *bp,
10850 struct rtnl_link_stats64 *stats)
10851 {
10852 int i;
10853
10854 for (i = 0; i < bp->cp_nr_rings; i++) {
10855 struct bnxt_napi *bnapi = bp->bnapi[i];
10856 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10857 u64 *sw = cpr->stats.sw_stats;
10858
10859 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10860 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10861 stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10862
10863 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10864 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10865 stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10866
10867 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10868 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10869 stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10870
10871 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10872 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10873 stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10874
10875 stats->rx_missed_errors +=
10876 BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10877
10878 stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10879
10880 stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10881
10882 stats->rx_dropped +=
10883 cpr->sw_stats.rx.rx_netpoll_discards +
10884 cpr->sw_stats.rx.rx_oom_discards;
10885 }
10886 }
10887
bnxt_add_prev_stats(struct bnxt * bp,struct rtnl_link_stats64 * stats)10888 static void bnxt_add_prev_stats(struct bnxt *bp,
10889 struct rtnl_link_stats64 *stats)
10890 {
10891 struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10892
10893 stats->rx_packets += prev_stats->rx_packets;
10894 stats->tx_packets += prev_stats->tx_packets;
10895 stats->rx_bytes += prev_stats->rx_bytes;
10896 stats->tx_bytes += prev_stats->tx_bytes;
10897 stats->rx_missed_errors += prev_stats->rx_missed_errors;
10898 stats->multicast += prev_stats->multicast;
10899 stats->rx_dropped += prev_stats->rx_dropped;
10900 stats->tx_dropped += prev_stats->tx_dropped;
10901 }
10902
10903 static void
bnxt_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)10904 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10905 {
10906 struct bnxt *bp = netdev_priv(dev);
10907
10908 set_bit(BNXT_STATE_READ_STATS, &bp->state);
10909 /* Make sure bnxt_close_nic() sees that we are reading stats before
10910 * we check the BNXT_STATE_OPEN flag.
10911 */
10912 smp_mb__after_atomic();
10913 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10914 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10915 *stats = bp->net_stats_prev;
10916 return;
10917 }
10918
10919 bnxt_get_ring_stats(bp, stats);
10920 bnxt_add_prev_stats(bp, stats);
10921
10922 if (bp->flags & BNXT_FLAG_PORT_STATS) {
10923 u64 *rx = bp->port_stats.sw_stats;
10924 u64 *tx = bp->port_stats.sw_stats +
10925 BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10926
10927 stats->rx_crc_errors =
10928 BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10929 stats->rx_frame_errors =
10930 BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10931 stats->rx_length_errors =
10932 BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10933 BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10934 BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10935 stats->rx_errors =
10936 BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10937 BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10938 stats->collisions =
10939 BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10940 stats->tx_fifo_errors =
10941 BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10942 stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10943 }
10944 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10945 }
10946
bnxt_get_one_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats,struct bnxt_cp_ring_info * cpr)10947 static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
10948 struct bnxt_total_ring_err_stats *stats,
10949 struct bnxt_cp_ring_info *cpr)
10950 {
10951 struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
10952 u64 *hw_stats = cpr->stats.sw_stats;
10953
10954 stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
10955 stats->rx_total_resets += sw_stats->rx.rx_resets;
10956 stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors;
10957 stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards;
10958 stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards;
10959 stats->rx_total_ring_discards +=
10960 BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts);
10961 stats->tx_total_resets += sw_stats->tx.tx_resets;
10962 stats->tx_total_ring_discards +=
10963 BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts);
10964 stats->total_missed_irqs += sw_stats->cmn.missed_irqs;
10965 }
10966
bnxt_get_ring_err_stats(struct bnxt * bp,struct bnxt_total_ring_err_stats * stats)10967 void bnxt_get_ring_err_stats(struct bnxt *bp,
10968 struct bnxt_total_ring_err_stats *stats)
10969 {
10970 int i;
10971
10972 for (i = 0; i < bp->cp_nr_rings; i++)
10973 bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring);
10974 }
10975
bnxt_mc_list_updated(struct bnxt * bp,u32 * rx_mask)10976 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10977 {
10978 struct net_device *dev = bp->dev;
10979 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10980 struct netdev_hw_addr *ha;
10981 u8 *haddr;
10982 int mc_count = 0;
10983 bool update = false;
10984 int off = 0;
10985
10986 netdev_for_each_mc_addr(ha, dev) {
10987 if (mc_count >= BNXT_MAX_MC_ADDRS) {
10988 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10989 vnic->mc_list_count = 0;
10990 return false;
10991 }
10992 haddr = ha->addr;
10993 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10994 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10995 update = true;
10996 }
10997 off += ETH_ALEN;
10998 mc_count++;
10999 }
11000 if (mc_count)
11001 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11002
11003 if (mc_count != vnic->mc_list_count) {
11004 vnic->mc_list_count = mc_count;
11005 update = true;
11006 }
11007 return update;
11008 }
11009
bnxt_uc_list_updated(struct bnxt * bp)11010 static bool bnxt_uc_list_updated(struct bnxt *bp)
11011 {
11012 struct net_device *dev = bp->dev;
11013 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11014 struct netdev_hw_addr *ha;
11015 int off = 0;
11016
11017 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
11018 return true;
11019
11020 netdev_for_each_uc_addr(ha, dev) {
11021 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
11022 return true;
11023
11024 off += ETH_ALEN;
11025 }
11026 return false;
11027 }
11028
bnxt_set_rx_mode(struct net_device * dev)11029 static void bnxt_set_rx_mode(struct net_device *dev)
11030 {
11031 struct bnxt *bp = netdev_priv(dev);
11032 struct bnxt_vnic_info *vnic;
11033 bool mc_update = false;
11034 bool uc_update;
11035 u32 mask;
11036
11037 if (!test_bit(BNXT_STATE_OPEN, &bp->state))
11038 return;
11039
11040 vnic = &bp->vnic_info[0];
11041 mask = vnic->rx_mask;
11042 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
11043 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
11044 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
11045 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
11046
11047 if (dev->flags & IFF_PROMISC)
11048 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11049
11050 uc_update = bnxt_uc_list_updated(bp);
11051
11052 if (dev->flags & IFF_BROADCAST)
11053 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
11054 if (dev->flags & IFF_ALLMULTI) {
11055 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11056 vnic->mc_list_count = 0;
11057 } else if (dev->flags & IFF_MULTICAST) {
11058 mc_update = bnxt_mc_list_updated(bp, &mask);
11059 }
11060
11061 if (mask != vnic->rx_mask || uc_update || mc_update) {
11062 vnic->rx_mask = mask;
11063
11064 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
11065 }
11066 }
11067
bnxt_cfg_rx_mode(struct bnxt * bp)11068 static int bnxt_cfg_rx_mode(struct bnxt *bp)
11069 {
11070 struct net_device *dev = bp->dev;
11071 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11072 struct hwrm_cfa_l2_filter_free_input *req;
11073 struct netdev_hw_addr *ha;
11074 int i, off = 0, rc;
11075 bool uc_update;
11076
11077 netif_addr_lock_bh(dev);
11078 uc_update = bnxt_uc_list_updated(bp);
11079 netif_addr_unlock_bh(dev);
11080
11081 if (!uc_update)
11082 goto skip_uc;
11083
11084 rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11085 if (rc)
11086 return rc;
11087 hwrm_req_hold(bp, req);
11088 for (i = 1; i < vnic->uc_filter_count; i++) {
11089 req->l2_filter_id = vnic->fw_l2_filter_id[i];
11090
11091 rc = hwrm_req_send(bp, req);
11092 }
11093 hwrm_req_drop(bp, req);
11094
11095 vnic->uc_filter_count = 1;
11096
11097 netif_addr_lock_bh(dev);
11098 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
11099 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11100 } else {
11101 netdev_for_each_uc_addr(ha, dev) {
11102 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11103 off += ETH_ALEN;
11104 vnic->uc_filter_count++;
11105 }
11106 }
11107 netif_addr_unlock_bh(dev);
11108
11109 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11110 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11111 if (rc) {
11112 if (BNXT_VF(bp) && rc == -ENODEV) {
11113 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11114 netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11115 else
11116 netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11117 rc = 0;
11118 } else {
11119 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11120 }
11121 vnic->uc_filter_count = i;
11122 return rc;
11123 }
11124 }
11125 if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11126 netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
11127
11128 skip_uc:
11129 if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11130 !bnxt_promisc_ok(bp))
11131 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11132 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11133 if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
11134 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11135 rc);
11136 vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11137 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11138 vnic->mc_list_count = 0;
11139 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11140 }
11141 if (rc)
11142 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
11143 rc);
11144
11145 return rc;
11146 }
11147
bnxt_can_reserve_rings(struct bnxt * bp)11148 static bool bnxt_can_reserve_rings(struct bnxt *bp)
11149 {
11150 #ifdef CONFIG_BNXT_SRIOV
11151 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
11152 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11153
11154 /* No minimum rings were provisioned by the PF. Don't
11155 * reserve rings by default when device is down.
11156 */
11157 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
11158 return true;
11159
11160 if (!netif_running(bp->dev))
11161 return false;
11162 }
11163 #endif
11164 return true;
11165 }
11166
11167 /* If the chip and firmware supports RFS */
bnxt_rfs_supported(struct bnxt * bp)11168 static bool bnxt_rfs_supported(struct bnxt *bp)
11169 {
11170 if (bp->flags & BNXT_FLAG_CHIP_P5) {
11171 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
11172 return true;
11173 return false;
11174 }
11175 /* 212 firmware is broken for aRFS */
11176 if (BNXT_FW_MAJ(bp) == 212)
11177 return false;
11178 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11179 return true;
11180 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11181 return true;
11182 return false;
11183 }
11184
11185 /* If runtime conditions support RFS */
bnxt_rfs_capable(struct bnxt * bp)11186 static bool bnxt_rfs_capable(struct bnxt *bp)
11187 {
11188 #ifdef CONFIG_RFS_ACCEL
11189 int vnics, max_vnics, max_rss_ctxs;
11190
11191 if (bp->flags & BNXT_FLAG_CHIP_P5)
11192 return bnxt_rfs_supported(bp);
11193 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
11194 return false;
11195
11196 vnics = 1 + bp->rx_nr_rings;
11197 max_vnics = bnxt_get_max_func_vnics(bp);
11198 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
11199
11200 /* RSS contexts not a limiting factor */
11201 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11202 max_rss_ctxs = max_vnics;
11203 if (vnics > max_vnics || vnics > max_rss_ctxs) {
11204 if (bp->rx_nr_rings > 1)
11205 netdev_warn(bp->dev,
11206 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
11207 min(max_rss_ctxs - 1, max_vnics - 1));
11208 return false;
11209 }
11210
11211 if (!BNXT_NEW_RM(bp))
11212 return true;
11213
11214 if (vnics == bp->hw_resc.resv_vnics)
11215 return true;
11216
11217 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11218 if (vnics <= bp->hw_resc.resv_vnics)
11219 return true;
11220
11221 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11222 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11223 return false;
11224 #else
11225 return false;
11226 #endif
11227 }
11228
bnxt_fix_features(struct net_device * dev,netdev_features_t features)11229 static netdev_features_t bnxt_fix_features(struct net_device *dev,
11230 netdev_features_t features)
11231 {
11232 struct bnxt *bp = netdev_priv(dev);
11233 netdev_features_t vlan_features;
11234
11235 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11236 features &= ~NETIF_F_NTUPLE;
11237
11238 if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
11239 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11240
11241 if (!(features & NETIF_F_GRO))
11242 features &= ~NETIF_F_GRO_HW;
11243
11244 if (features & NETIF_F_GRO_HW)
11245 features &= ~NETIF_F_LRO;
11246
11247 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11248 * turned on or off together.
11249 */
11250 vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11251 if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11252 if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11253 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11254 else if (vlan_features)
11255 features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
11256 }
11257 #ifdef CONFIG_BNXT_SRIOV
11258 if (BNXT_VF(bp) && bp->vf.vlan)
11259 features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11260 #endif
11261 return features;
11262 }
11263
bnxt_set_features(struct net_device * dev,netdev_features_t features)11264 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11265 {
11266 struct bnxt *bp = netdev_priv(dev);
11267 u32 flags = bp->flags;
11268 u32 changes;
11269 int rc = 0;
11270 bool re_init = false;
11271 bool update_tpa = false;
11272
11273 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
11274 if (features & NETIF_F_GRO_HW)
11275 flags |= BNXT_FLAG_GRO;
11276 else if (features & NETIF_F_LRO)
11277 flags |= BNXT_FLAG_LRO;
11278
11279 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11280 flags &= ~BNXT_FLAG_TPA;
11281
11282 if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11283 flags |= BNXT_FLAG_STRIP_VLAN;
11284
11285 if (features & NETIF_F_NTUPLE)
11286 flags |= BNXT_FLAG_RFS;
11287
11288 changes = flags ^ bp->flags;
11289 if (changes & BNXT_FLAG_TPA) {
11290 update_tpa = true;
11291 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11292 (flags & BNXT_FLAG_TPA) == 0 ||
11293 (bp->flags & BNXT_FLAG_CHIP_P5))
11294 re_init = true;
11295 }
11296
11297 if (changes & ~BNXT_FLAG_TPA)
11298 re_init = true;
11299
11300 if (flags != bp->flags) {
11301 u32 old_flags = bp->flags;
11302
11303 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11304 bp->flags = flags;
11305 if (update_tpa)
11306 bnxt_set_ring_params(bp);
11307 return rc;
11308 }
11309
11310 if (re_init) {
11311 bnxt_close_nic(bp, false, false);
11312 bp->flags = flags;
11313 if (update_tpa)
11314 bnxt_set_ring_params(bp);
11315
11316 return bnxt_open_nic(bp, false, false);
11317 }
11318 if (update_tpa) {
11319 bp->flags = flags;
11320 rc = bnxt_set_tpa(bp,
11321 (flags & BNXT_FLAG_TPA) ?
11322 true : false);
11323 if (rc)
11324 bp->flags = old_flags;
11325 }
11326 }
11327 return rc;
11328 }
11329
bnxt_exthdr_check(struct bnxt * bp,struct sk_buff * skb,int nw_off,u8 ** nextp)11330 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11331 u8 **nextp)
11332 {
11333 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11334 struct hop_jumbo_hdr *jhdr;
11335 int hdr_count = 0;
11336 u8 *nexthdr;
11337 int start;
11338
11339 /* Check that there are at most 2 IPv6 extension headers, no
11340 * fragment header, and each is <= 64 bytes.
11341 */
11342 start = nw_off + sizeof(*ip6h);
11343 nexthdr = &ip6h->nexthdr;
11344 while (ipv6_ext_hdr(*nexthdr)) {
11345 struct ipv6_opt_hdr *hp;
11346 int hdrlen;
11347
11348 if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11349 *nexthdr == NEXTHDR_FRAGMENT)
11350 return false;
11351 hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11352 skb_headlen(skb), NULL);
11353 if (!hp)
11354 return false;
11355 if (*nexthdr == NEXTHDR_AUTH)
11356 hdrlen = ipv6_authlen(hp);
11357 else
11358 hdrlen = ipv6_optlen(hp);
11359
11360 if (hdrlen > 64)
11361 return false;
11362
11363 /* The ext header may be a hop-by-hop header inserted for
11364 * big TCP purposes. This will be removed before sending
11365 * from NIC, so do not count it.
11366 */
11367 if (*nexthdr == NEXTHDR_HOP) {
11368 if (likely(skb->len <= GRO_LEGACY_MAX_SIZE))
11369 goto increment_hdr;
11370
11371 jhdr = (struct hop_jumbo_hdr *)hp;
11372 if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 ||
11373 jhdr->nexthdr != IPPROTO_TCP)
11374 goto increment_hdr;
11375
11376 goto next_hdr;
11377 }
11378 increment_hdr:
11379 hdr_count++;
11380 next_hdr:
11381 nexthdr = &hp->nexthdr;
11382 start += hdrlen;
11383 }
11384 if (nextp) {
11385 /* Caller will check inner protocol */
11386 if (skb->encapsulation) {
11387 *nextp = nexthdr;
11388 return true;
11389 }
11390 *nextp = NULL;
11391 }
11392 /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11393 return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11394 }
11395
11396 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
bnxt_udp_tunl_check(struct bnxt * bp,struct sk_buff * skb)11397 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11398 {
11399 struct udphdr *uh = udp_hdr(skb);
11400 __be16 udp_port = uh->dest;
11401
11402 if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11403 return false;
11404 if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11405 struct ethhdr *eh = inner_eth_hdr(skb);
11406
11407 switch (eh->h_proto) {
11408 case htons(ETH_P_IP):
11409 return true;
11410 case htons(ETH_P_IPV6):
11411 return bnxt_exthdr_check(bp, skb,
11412 skb_inner_network_offset(skb),
11413 NULL);
11414 }
11415 }
11416 return false;
11417 }
11418
bnxt_tunl_check(struct bnxt * bp,struct sk_buff * skb,u8 l4_proto)11419 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11420 {
11421 switch (l4_proto) {
11422 case IPPROTO_UDP:
11423 return bnxt_udp_tunl_check(bp, skb);
11424 case IPPROTO_IPIP:
11425 return true;
11426 case IPPROTO_GRE: {
11427 switch (skb->inner_protocol) {
11428 default:
11429 return false;
11430 case htons(ETH_P_IP):
11431 return true;
11432 case htons(ETH_P_IPV6):
11433 fallthrough;
11434 }
11435 }
11436 case IPPROTO_IPV6:
11437 /* Check ext headers of inner ipv6 */
11438 return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11439 NULL);
11440 }
11441 return false;
11442 }
11443
bnxt_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)11444 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11445 struct net_device *dev,
11446 netdev_features_t features)
11447 {
11448 struct bnxt *bp = netdev_priv(dev);
11449 u8 *l4_proto;
11450
11451 features = vlan_features_check(skb, features);
11452 switch (vlan_get_protocol(skb)) {
11453 case htons(ETH_P_IP):
11454 if (!skb->encapsulation)
11455 return features;
11456 l4_proto = &ip_hdr(skb)->protocol;
11457 if (bnxt_tunl_check(bp, skb, *l4_proto))
11458 return features;
11459 break;
11460 case htons(ETH_P_IPV6):
11461 if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11462 &l4_proto))
11463 break;
11464 if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11465 return features;
11466 break;
11467 }
11468 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11469 }
11470
bnxt_dbg_hwrm_rd_reg(struct bnxt * bp,u32 reg_off,u16 num_words,u32 * reg_buf)11471 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11472 u32 *reg_buf)
11473 {
11474 struct hwrm_dbg_read_direct_output *resp;
11475 struct hwrm_dbg_read_direct_input *req;
11476 __le32 *dbg_reg_buf;
11477 dma_addr_t mapping;
11478 int rc, i;
11479
11480 rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11481 if (rc)
11482 return rc;
11483
11484 dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11485 &mapping);
11486 if (!dbg_reg_buf) {
11487 rc = -ENOMEM;
11488 goto dbg_rd_reg_exit;
11489 }
11490
11491 req->host_dest_addr = cpu_to_le64(mapping);
11492
11493 resp = hwrm_req_hold(bp, req);
11494 req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11495 req->read_len32 = cpu_to_le32(num_words);
11496
11497 rc = hwrm_req_send(bp, req);
11498 if (rc || resp->error_code) {
11499 rc = -EIO;
11500 goto dbg_rd_reg_exit;
11501 }
11502 for (i = 0; i < num_words; i++)
11503 reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11504
11505 dbg_rd_reg_exit:
11506 hwrm_req_drop(bp, req);
11507 return rc;
11508 }
11509
bnxt_dbg_hwrm_ring_info_get(struct bnxt * bp,u8 ring_type,u32 ring_id,u32 * prod,u32 * cons)11510 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11511 u32 ring_id, u32 *prod, u32 *cons)
11512 {
11513 struct hwrm_dbg_ring_info_get_output *resp;
11514 struct hwrm_dbg_ring_info_get_input *req;
11515 int rc;
11516
11517 rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11518 if (rc)
11519 return rc;
11520
11521 req->ring_type = ring_type;
11522 req->fw_ring_id = cpu_to_le32(ring_id);
11523 resp = hwrm_req_hold(bp, req);
11524 rc = hwrm_req_send(bp, req);
11525 if (!rc) {
11526 *prod = le32_to_cpu(resp->producer_index);
11527 *cons = le32_to_cpu(resp->consumer_index);
11528 }
11529 hwrm_req_drop(bp, req);
11530 return rc;
11531 }
11532
bnxt_dump_tx_sw_state(struct bnxt_napi * bnapi)11533 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11534 {
11535 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11536 int i = bnapi->index;
11537
11538 if (!txr)
11539 return;
11540
11541 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11542 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11543 txr->tx_cons);
11544 }
11545
bnxt_dump_rx_sw_state(struct bnxt_napi * bnapi)11546 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11547 {
11548 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11549 int i = bnapi->index;
11550
11551 if (!rxr)
11552 return;
11553
11554 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11555 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11556 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11557 rxr->rx_sw_agg_prod);
11558 }
11559
bnxt_dump_cp_sw_state(struct bnxt_napi * bnapi)11560 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11561 {
11562 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11563 int i = bnapi->index;
11564
11565 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11566 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11567 }
11568
bnxt_dbg_dump_states(struct bnxt * bp)11569 static void bnxt_dbg_dump_states(struct bnxt *bp)
11570 {
11571 int i;
11572 struct bnxt_napi *bnapi;
11573
11574 for (i = 0; i < bp->cp_nr_rings; i++) {
11575 bnapi = bp->bnapi[i];
11576 if (netif_msg_drv(bp)) {
11577 bnxt_dump_tx_sw_state(bnapi);
11578 bnxt_dump_rx_sw_state(bnapi);
11579 bnxt_dump_cp_sw_state(bnapi);
11580 }
11581 }
11582 }
11583
bnxt_hwrm_rx_ring_reset(struct bnxt * bp,int ring_nr)11584 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11585 {
11586 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11587 struct hwrm_ring_reset_input *req;
11588 struct bnxt_napi *bnapi = rxr->bnapi;
11589 struct bnxt_cp_ring_info *cpr;
11590 u16 cp_ring_id;
11591 int rc;
11592
11593 rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11594 if (rc)
11595 return rc;
11596
11597 cpr = &bnapi->cp_ring;
11598 cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11599 req->cmpl_ring = cpu_to_le16(cp_ring_id);
11600 req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11601 req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11602 return hwrm_req_send_silent(bp, req);
11603 }
11604
bnxt_reset_task(struct bnxt * bp,bool silent)11605 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11606 {
11607 if (!silent)
11608 bnxt_dbg_dump_states(bp);
11609 if (netif_running(bp->dev)) {
11610 int rc;
11611
11612 if (silent) {
11613 bnxt_close_nic(bp, false, false);
11614 bnxt_open_nic(bp, false, false);
11615 } else {
11616 bnxt_ulp_stop(bp);
11617 bnxt_close_nic(bp, true, false);
11618 rc = bnxt_open_nic(bp, true, false);
11619 bnxt_ulp_start(bp, rc);
11620 }
11621 }
11622 }
11623
bnxt_tx_timeout(struct net_device * dev,unsigned int txqueue)11624 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11625 {
11626 struct bnxt *bp = netdev_priv(dev);
11627
11628 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
11629 bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT);
11630 }
11631
bnxt_fw_health_check(struct bnxt * bp)11632 static void bnxt_fw_health_check(struct bnxt *bp)
11633 {
11634 struct bnxt_fw_health *fw_health = bp->fw_health;
11635 struct pci_dev *pdev = bp->pdev;
11636 u32 val;
11637
11638 if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11639 return;
11640
11641 /* Make sure it is enabled before checking the tmr_counter. */
11642 smp_rmb();
11643 if (fw_health->tmr_counter) {
11644 fw_health->tmr_counter--;
11645 return;
11646 }
11647
11648 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11649 if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
11650 fw_health->arrests++;
11651 goto fw_reset;
11652 }
11653
11654 fw_health->last_fw_heartbeat = val;
11655
11656 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11657 if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
11658 fw_health->discoveries++;
11659 goto fw_reset;
11660 }
11661
11662 fw_health->tmr_counter = fw_health->tmr_multiplier;
11663 return;
11664
11665 fw_reset:
11666 bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT);
11667 }
11668
bnxt_timer(struct timer_list * t)11669 static void bnxt_timer(struct timer_list *t)
11670 {
11671 struct bnxt *bp = from_timer(bp, t, timer);
11672 struct net_device *dev = bp->dev;
11673
11674 if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11675 return;
11676
11677 if (atomic_read(&bp->intr_sem) != 0)
11678 goto bnxt_restart_timer;
11679
11680 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11681 bnxt_fw_health_check(bp);
11682
11683 if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks)
11684 bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT);
11685
11686 if (bnxt_tc_flower_enabled(bp))
11687 bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT);
11688
11689 #ifdef CONFIG_RFS_ACCEL
11690 if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count)
11691 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
11692 #endif /*CONFIG_RFS_ACCEL*/
11693
11694 if (bp->link_info.phy_retry) {
11695 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11696 bp->link_info.phy_retry = false;
11697 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11698 } else {
11699 bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT);
11700 }
11701 }
11702
11703 if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11704 bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT);
11705
11706 if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11707 netif_carrier_ok(dev))
11708 bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT);
11709
11710 bnxt_restart_timer:
11711 mod_timer(&bp->timer, jiffies + bp->current_interval);
11712 }
11713
bnxt_rtnl_lock_sp(struct bnxt * bp)11714 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11715 {
11716 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11717 * set. If the device is being closed, bnxt_close() may be holding
11718 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
11719 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11720 */
11721 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11722 rtnl_lock();
11723 }
11724
bnxt_rtnl_unlock_sp(struct bnxt * bp)11725 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11726 {
11727 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11728 rtnl_unlock();
11729 }
11730
11731 /* Only called from bnxt_sp_task() */
bnxt_reset(struct bnxt * bp,bool silent)11732 static void bnxt_reset(struct bnxt *bp, bool silent)
11733 {
11734 bnxt_rtnl_lock_sp(bp);
11735 if (test_bit(BNXT_STATE_OPEN, &bp->state))
11736 bnxt_reset_task(bp, silent);
11737 bnxt_rtnl_unlock_sp(bp);
11738 }
11739
11740 /* Only called from bnxt_sp_task() */
bnxt_rx_ring_reset(struct bnxt * bp)11741 static void bnxt_rx_ring_reset(struct bnxt *bp)
11742 {
11743 int i;
11744
11745 bnxt_rtnl_lock_sp(bp);
11746 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11747 bnxt_rtnl_unlock_sp(bp);
11748 return;
11749 }
11750 /* Disable and flush TPA before resetting the RX ring */
11751 if (bp->flags & BNXT_FLAG_TPA)
11752 bnxt_set_tpa(bp, false);
11753 for (i = 0; i < bp->rx_nr_rings; i++) {
11754 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11755 struct bnxt_cp_ring_info *cpr;
11756 int rc;
11757
11758 if (!rxr->bnapi->in_reset)
11759 continue;
11760
11761 rc = bnxt_hwrm_rx_ring_reset(bp, i);
11762 if (rc) {
11763 if (rc == -EINVAL || rc == -EOPNOTSUPP)
11764 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11765 else
11766 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11767 rc);
11768 bnxt_reset_task(bp, true);
11769 break;
11770 }
11771 bnxt_free_one_rx_ring_skbs(bp, i);
11772 rxr->rx_prod = 0;
11773 rxr->rx_agg_prod = 0;
11774 rxr->rx_sw_agg_prod = 0;
11775 rxr->rx_next_cons = 0;
11776 rxr->bnapi->in_reset = false;
11777 bnxt_alloc_one_rx_ring(bp, i);
11778 cpr = &rxr->bnapi->cp_ring;
11779 cpr->sw_stats.rx.rx_resets++;
11780 if (bp->flags & BNXT_FLAG_AGG_RINGS)
11781 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11782 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11783 }
11784 if (bp->flags & BNXT_FLAG_TPA)
11785 bnxt_set_tpa(bp, true);
11786 bnxt_rtnl_unlock_sp(bp);
11787 }
11788
bnxt_fw_reset_close(struct bnxt * bp)11789 static void bnxt_fw_reset_close(struct bnxt *bp)
11790 {
11791 bnxt_ulp_stop(bp);
11792 /* When firmware is in fatal state, quiesce device and disable
11793 * bus master to prevent any potential bad DMAs before freeing
11794 * kernel memory.
11795 */
11796 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11797 u16 val = 0;
11798
11799 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11800 if (val == 0xffff)
11801 bp->fw_reset_min_dsecs = 0;
11802 bnxt_tx_disable(bp);
11803 bnxt_disable_napi(bp);
11804 bnxt_disable_int_sync(bp);
11805 bnxt_free_irq(bp);
11806 bnxt_clear_int_mode(bp);
11807 pci_disable_device(bp->pdev);
11808 }
11809 __bnxt_close_nic(bp, true, false);
11810 bnxt_vf_reps_free(bp);
11811 bnxt_clear_int_mode(bp);
11812 bnxt_hwrm_func_drv_unrgtr(bp);
11813 if (pci_is_enabled(bp->pdev))
11814 pci_disable_device(bp->pdev);
11815 bnxt_free_ctx_mem(bp);
11816 kfree(bp->ctx);
11817 bp->ctx = NULL;
11818 }
11819
is_bnxt_fw_ok(struct bnxt * bp)11820 static bool is_bnxt_fw_ok(struct bnxt *bp)
11821 {
11822 struct bnxt_fw_health *fw_health = bp->fw_health;
11823 bool no_heartbeat = false, has_reset = false;
11824 u32 val;
11825
11826 val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11827 if (val == fw_health->last_fw_heartbeat)
11828 no_heartbeat = true;
11829
11830 val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11831 if (val != fw_health->last_fw_reset_cnt)
11832 has_reset = true;
11833
11834 if (!no_heartbeat && has_reset)
11835 return true;
11836
11837 return false;
11838 }
11839
11840 /* rtnl_lock is acquired before calling this function */
bnxt_force_fw_reset(struct bnxt * bp)11841 static void bnxt_force_fw_reset(struct bnxt *bp)
11842 {
11843 struct bnxt_fw_health *fw_health = bp->fw_health;
11844 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11845 u32 wait_dsecs;
11846
11847 if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11848 test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11849 return;
11850
11851 if (ptp) {
11852 spin_lock_bh(&ptp->ptp_lock);
11853 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11854 spin_unlock_bh(&ptp->ptp_lock);
11855 } else {
11856 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11857 }
11858 bnxt_fw_reset_close(bp);
11859 wait_dsecs = fw_health->master_func_wait_dsecs;
11860 if (fw_health->primary) {
11861 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11862 wait_dsecs = 0;
11863 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11864 } else {
11865 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11866 wait_dsecs = fw_health->normal_func_wait_dsecs;
11867 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11868 }
11869
11870 bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11871 bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11872 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11873 }
11874
bnxt_fw_exception(struct bnxt * bp)11875 void bnxt_fw_exception(struct bnxt *bp)
11876 {
11877 netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11878 set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11879 bnxt_rtnl_lock_sp(bp);
11880 bnxt_force_fw_reset(bp);
11881 bnxt_rtnl_unlock_sp(bp);
11882 }
11883
11884 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11885 * < 0 on error.
11886 */
bnxt_get_registered_vfs(struct bnxt * bp)11887 static int bnxt_get_registered_vfs(struct bnxt *bp)
11888 {
11889 #ifdef CONFIG_BNXT_SRIOV
11890 int rc;
11891
11892 if (!BNXT_PF(bp))
11893 return 0;
11894
11895 rc = bnxt_hwrm_func_qcfg(bp);
11896 if (rc) {
11897 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11898 return rc;
11899 }
11900 if (bp->pf.registered_vfs)
11901 return bp->pf.registered_vfs;
11902 if (bp->sriov_cfg)
11903 return 1;
11904 #endif
11905 return 0;
11906 }
11907
bnxt_fw_reset(struct bnxt * bp)11908 void bnxt_fw_reset(struct bnxt *bp)
11909 {
11910 bnxt_rtnl_lock_sp(bp);
11911 if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11912 !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11913 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11914 int n = 0, tmo;
11915
11916 if (ptp) {
11917 spin_lock_bh(&ptp->ptp_lock);
11918 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11919 spin_unlock_bh(&ptp->ptp_lock);
11920 } else {
11921 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11922 }
11923 if (bp->pf.active_vfs &&
11924 !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11925 n = bnxt_get_registered_vfs(bp);
11926 if (n < 0) {
11927 netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11928 n);
11929 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11930 dev_close(bp->dev);
11931 goto fw_reset_exit;
11932 } else if (n > 0) {
11933 u16 vf_tmo_dsecs = n * 10;
11934
11935 if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11936 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11937 bp->fw_reset_state =
11938 BNXT_FW_RESET_STATE_POLL_VF;
11939 bnxt_queue_fw_reset_work(bp, HZ / 10);
11940 goto fw_reset_exit;
11941 }
11942 bnxt_fw_reset_close(bp);
11943 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11944 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11945 tmo = HZ / 10;
11946 } else {
11947 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11948 tmo = bp->fw_reset_min_dsecs * HZ / 10;
11949 }
11950 bnxt_queue_fw_reset_work(bp, tmo);
11951 }
11952 fw_reset_exit:
11953 bnxt_rtnl_unlock_sp(bp);
11954 }
11955
bnxt_chk_missed_irq(struct bnxt * bp)11956 static void bnxt_chk_missed_irq(struct bnxt *bp)
11957 {
11958 int i;
11959
11960 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11961 return;
11962
11963 for (i = 0; i < bp->cp_nr_rings; i++) {
11964 struct bnxt_napi *bnapi = bp->bnapi[i];
11965 struct bnxt_cp_ring_info *cpr;
11966 u32 fw_ring_id;
11967 int j;
11968
11969 if (!bnapi)
11970 continue;
11971
11972 cpr = &bnapi->cp_ring;
11973 for (j = 0; j < 2; j++) {
11974 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11975 u32 val[2];
11976
11977 if (!cpr2 || cpr2->has_more_work ||
11978 !bnxt_has_work(bp, cpr2))
11979 continue;
11980
11981 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11982 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11983 continue;
11984 }
11985 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11986 bnxt_dbg_hwrm_ring_info_get(bp,
11987 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11988 fw_ring_id, &val[0], &val[1]);
11989 cpr->sw_stats.cmn.missed_irqs++;
11990 }
11991 }
11992 }
11993
11994 static void bnxt_cfg_ntp_filters(struct bnxt *);
11995
bnxt_init_ethtool_link_settings(struct bnxt * bp)11996 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11997 {
11998 struct bnxt_link_info *link_info = &bp->link_info;
11999
12000 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
12001 link_info->autoneg = BNXT_AUTONEG_SPEED;
12002 if (bp->hwrm_spec_code >= 0x10201) {
12003 if (link_info->auto_pause_setting &
12004 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
12005 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12006 } else {
12007 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
12008 }
12009 link_info->advertising = link_info->auto_link_speeds;
12010 link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
12011 } else {
12012 link_info->req_link_speed = link_info->force_link_speed;
12013 link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
12014 if (link_info->force_pam4_link_speed) {
12015 link_info->req_link_speed =
12016 link_info->force_pam4_link_speed;
12017 link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
12018 }
12019 link_info->req_duplex = link_info->duplex_setting;
12020 }
12021 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
12022 link_info->req_flow_ctrl =
12023 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
12024 else
12025 link_info->req_flow_ctrl = link_info->force_pause_setting;
12026 }
12027
bnxt_fw_echo_reply(struct bnxt * bp)12028 static void bnxt_fw_echo_reply(struct bnxt *bp)
12029 {
12030 struct bnxt_fw_health *fw_health = bp->fw_health;
12031 struct hwrm_func_echo_response_input *req;
12032 int rc;
12033
12034 rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
12035 if (rc)
12036 return;
12037 req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
12038 req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
12039 hwrm_req_send(bp, req);
12040 }
12041
bnxt_sp_task(struct work_struct * work)12042 static void bnxt_sp_task(struct work_struct *work)
12043 {
12044 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
12045
12046 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12047 smp_mb__after_atomic();
12048 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
12049 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12050 return;
12051 }
12052
12053 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
12054 bnxt_cfg_rx_mode(bp);
12055
12056 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
12057 bnxt_cfg_ntp_filters(bp);
12058 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
12059 bnxt_hwrm_exec_fwd_req(bp);
12060 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
12061 bnxt_hwrm_port_qstats(bp, 0);
12062 bnxt_hwrm_port_qstats_ext(bp, 0);
12063 bnxt_accumulate_all_stats(bp);
12064 }
12065
12066 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
12067 int rc;
12068
12069 mutex_lock(&bp->link_lock);
12070 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
12071 &bp->sp_event))
12072 bnxt_hwrm_phy_qcaps(bp);
12073
12074 rc = bnxt_update_link(bp, true);
12075 if (rc)
12076 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12077 rc);
12078
12079 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
12080 &bp->sp_event))
12081 bnxt_init_ethtool_link_settings(bp);
12082 mutex_unlock(&bp->link_lock);
12083 }
12084 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12085 int rc;
12086
12087 mutex_lock(&bp->link_lock);
12088 rc = bnxt_update_phy_setting(bp);
12089 mutex_unlock(&bp->link_lock);
12090 if (rc) {
12091 netdev_warn(bp->dev, "update phy settings retry failed\n");
12092 } else {
12093 bp->link_info.phy_retry = false;
12094 netdev_info(bp->dev, "update phy settings retry succeeded\n");
12095 }
12096 }
12097 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
12098 mutex_lock(&bp->link_lock);
12099 bnxt_get_port_module_status(bp);
12100 mutex_unlock(&bp->link_lock);
12101 }
12102
12103 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12104 bnxt_tc_flow_stats_work(bp);
12105
12106 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12107 bnxt_chk_missed_irq(bp);
12108
12109 if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12110 bnxt_fw_echo_reply(bp);
12111
12112 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
12113 * must be the last functions to be called before exiting.
12114 */
12115 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12116 bnxt_reset(bp, false);
12117
12118 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12119 bnxt_reset(bp, true);
12120
12121 if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12122 bnxt_rx_ring_reset(bp);
12123
12124 if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12125 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12126 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12127 bnxt_devlink_health_fw_report(bp);
12128 else
12129 bnxt_fw_reset(bp);
12130 }
12131
12132 if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12133 if (!is_bnxt_fw_ok(bp))
12134 bnxt_devlink_health_fw_report(bp);
12135 }
12136
12137 smp_mb__before_atomic();
12138 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12139 }
12140
12141 /* Under rtnl_lock */
bnxt_check_rings(struct bnxt * bp,int tx,int rx,bool sh,int tcs,int tx_xdp)12142 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12143 int tx_xdp)
12144 {
12145 int max_rx, max_tx, tx_sets = 1;
12146 int tx_rings_needed, stats;
12147 int rx_rings = rx;
12148 int cp, vnics, rc;
12149
12150 if (tcs)
12151 tx_sets = tcs;
12152
12153 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12154 if (rc)
12155 return rc;
12156
12157 if (max_rx < rx)
12158 return -ENOMEM;
12159
12160 tx_rings_needed = tx * tx_sets + tx_xdp;
12161 if (max_tx < tx_rings_needed)
12162 return -ENOMEM;
12163
12164 vnics = 1;
12165 if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
12166 vnics += rx_rings;
12167
12168 if (bp->flags & BNXT_FLAG_AGG_RINGS)
12169 rx_rings <<= 1;
12170 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
12171 stats = cp;
12172 if (BNXT_NEW_RM(bp)) {
12173 cp += bnxt_get_ulp_msix_num(bp);
12174 stats += bnxt_get_ulp_stat_ctxs(bp);
12175 }
12176 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
12177 stats, vnics);
12178 }
12179
bnxt_unmap_bars(struct bnxt * bp,struct pci_dev * pdev)12180 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12181 {
12182 if (bp->bar2) {
12183 pci_iounmap(pdev, bp->bar2);
12184 bp->bar2 = NULL;
12185 }
12186
12187 if (bp->bar1) {
12188 pci_iounmap(pdev, bp->bar1);
12189 bp->bar1 = NULL;
12190 }
12191
12192 if (bp->bar0) {
12193 pci_iounmap(pdev, bp->bar0);
12194 bp->bar0 = NULL;
12195 }
12196 }
12197
bnxt_cleanup_pci(struct bnxt * bp)12198 static void bnxt_cleanup_pci(struct bnxt *bp)
12199 {
12200 bnxt_unmap_bars(bp, bp->pdev);
12201 pci_release_regions(bp->pdev);
12202 if (pci_is_enabled(bp->pdev))
12203 pci_disable_device(bp->pdev);
12204 }
12205
bnxt_init_dflt_coal(struct bnxt * bp)12206 static void bnxt_init_dflt_coal(struct bnxt *bp)
12207 {
12208 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
12209 struct bnxt_coal *coal;
12210 u16 flags = 0;
12211
12212 if (coal_cap->cmpl_params &
12213 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
12214 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
12215
12216 /* Tick values in micro seconds.
12217 * 1 coal_buf x bufs_per_record = 1 completion record.
12218 */
12219 coal = &bp->rx_coal;
12220 coal->coal_ticks = 10;
12221 coal->coal_bufs = 30;
12222 coal->coal_ticks_irq = 1;
12223 coal->coal_bufs_irq = 2;
12224 coal->idle_thresh = 50;
12225 coal->bufs_per_record = 2;
12226 coal->budget = 64; /* NAPI budget */
12227 coal->flags = flags;
12228
12229 coal = &bp->tx_coal;
12230 coal->coal_ticks = 28;
12231 coal->coal_bufs = 30;
12232 coal->coal_ticks_irq = 2;
12233 coal->coal_bufs_irq = 2;
12234 coal->bufs_per_record = 1;
12235 coal->flags = flags;
12236
12237 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12238 }
12239
bnxt_fw_init_one_p1(struct bnxt * bp)12240 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12241 {
12242 int rc;
12243
12244 bp->fw_cap = 0;
12245 rc = bnxt_hwrm_ver_get(bp);
12246 bnxt_try_map_fw_health_reg(bp);
12247 if (rc) {
12248 rc = bnxt_try_recover_fw(bp);
12249 if (rc)
12250 return rc;
12251 rc = bnxt_hwrm_ver_get(bp);
12252 if (rc)
12253 return rc;
12254 }
12255
12256 bnxt_nvm_cfg_ver_get(bp);
12257
12258 rc = bnxt_hwrm_func_reset(bp);
12259 if (rc)
12260 return -ENODEV;
12261
12262 bnxt_hwrm_fw_set_time(bp);
12263 return 0;
12264 }
12265
bnxt_fw_init_one_p2(struct bnxt * bp)12266 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12267 {
12268 int rc;
12269
12270 /* Get the MAX capabilities for this function */
12271 rc = bnxt_hwrm_func_qcaps(bp);
12272 if (rc) {
12273 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12274 rc);
12275 return -ENODEV;
12276 }
12277
12278 rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12279 if (rc)
12280 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12281 rc);
12282
12283 if (bnxt_alloc_fw_health(bp)) {
12284 netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12285 } else {
12286 rc = bnxt_hwrm_error_recovery_qcfg(bp);
12287 if (rc)
12288 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12289 rc);
12290 }
12291
12292 rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12293 if (rc)
12294 return -ENODEV;
12295
12296 bnxt_hwrm_func_qcfg(bp);
12297 bnxt_hwrm_vnic_qcaps(bp);
12298 bnxt_hwrm_port_led_qcaps(bp);
12299 bnxt_ethtool_init(bp);
12300 if (bp->fw_cap & BNXT_FW_CAP_PTP)
12301 __bnxt_hwrm_ptp_qcfg(bp);
12302 bnxt_dcb_init(bp);
12303 return 0;
12304 }
12305
bnxt_set_dflt_rss_hash_type(struct bnxt * bp)12306 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12307 {
12308 bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12309 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12310 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12311 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12312 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
12313 if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA)
12314 bp->rss_hash_delta = bp->rss_hash_cfg;
12315 if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12316 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12317 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12318 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12319 }
12320 }
12321
bnxt_set_dflt_rfs(struct bnxt * bp)12322 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12323 {
12324 struct net_device *dev = bp->dev;
12325
12326 dev->hw_features &= ~NETIF_F_NTUPLE;
12327 dev->features &= ~NETIF_F_NTUPLE;
12328 bp->flags &= ~BNXT_FLAG_RFS;
12329 if (bnxt_rfs_supported(bp)) {
12330 dev->hw_features |= NETIF_F_NTUPLE;
12331 if (bnxt_rfs_capable(bp)) {
12332 bp->flags |= BNXT_FLAG_RFS;
12333 dev->features |= NETIF_F_NTUPLE;
12334 }
12335 }
12336 }
12337
bnxt_fw_init_one_p3(struct bnxt * bp)12338 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12339 {
12340 struct pci_dev *pdev = bp->pdev;
12341
12342 bnxt_set_dflt_rss_hash_type(bp);
12343 bnxt_set_dflt_rfs(bp);
12344
12345 bnxt_get_wol_settings(bp);
12346 if (bp->flags & BNXT_FLAG_WOL_CAP)
12347 device_set_wakeup_enable(&pdev->dev, bp->wol);
12348 else
12349 device_set_wakeup_capable(&pdev->dev, false);
12350
12351 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12352 bnxt_hwrm_coal_params_qcaps(bp);
12353 }
12354
12355 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12356
bnxt_fw_init_one(struct bnxt * bp)12357 int bnxt_fw_init_one(struct bnxt *bp)
12358 {
12359 int rc;
12360
12361 rc = bnxt_fw_init_one_p1(bp);
12362 if (rc) {
12363 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12364 return rc;
12365 }
12366 rc = bnxt_fw_init_one_p2(bp);
12367 if (rc) {
12368 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12369 return rc;
12370 }
12371 rc = bnxt_probe_phy(bp, false);
12372 if (rc)
12373 return rc;
12374 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12375 if (rc)
12376 return rc;
12377
12378 bnxt_fw_init_one_p3(bp);
12379 return 0;
12380 }
12381
bnxt_fw_reset_writel(struct bnxt * bp,int reg_idx)12382 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12383 {
12384 struct bnxt_fw_health *fw_health = bp->fw_health;
12385 u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12386 u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12387 u32 reg_type, reg_off, delay_msecs;
12388
12389 delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12390 reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12391 reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12392 switch (reg_type) {
12393 case BNXT_FW_HEALTH_REG_TYPE_CFG:
12394 pci_write_config_dword(bp->pdev, reg_off, val);
12395 break;
12396 case BNXT_FW_HEALTH_REG_TYPE_GRC:
12397 writel(reg_off & BNXT_GRC_BASE_MASK,
12398 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12399 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12400 fallthrough;
12401 case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12402 writel(val, bp->bar0 + reg_off);
12403 break;
12404 case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12405 writel(val, bp->bar1 + reg_off);
12406 break;
12407 }
12408 if (delay_msecs) {
12409 pci_read_config_dword(bp->pdev, 0, &val);
12410 msleep(delay_msecs);
12411 }
12412 }
12413
bnxt_hwrm_reset_permitted(struct bnxt * bp)12414 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12415 {
12416 struct hwrm_func_qcfg_output *resp;
12417 struct hwrm_func_qcfg_input *req;
12418 bool result = true; /* firmware will enforce if unknown */
12419
12420 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12421 return result;
12422
12423 if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12424 return result;
12425
12426 req->fid = cpu_to_le16(0xffff);
12427 resp = hwrm_req_hold(bp, req);
12428 if (!hwrm_req_send(bp, req))
12429 result = !!(le16_to_cpu(resp->flags) &
12430 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12431 hwrm_req_drop(bp, req);
12432 return result;
12433 }
12434
bnxt_reset_all(struct bnxt * bp)12435 static void bnxt_reset_all(struct bnxt *bp)
12436 {
12437 struct bnxt_fw_health *fw_health = bp->fw_health;
12438 int i, rc;
12439
12440 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12441 bnxt_fw_reset_via_optee(bp);
12442 bp->fw_reset_timestamp = jiffies;
12443 return;
12444 }
12445
12446 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12447 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12448 bnxt_fw_reset_writel(bp, i);
12449 } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12450 struct hwrm_fw_reset_input *req;
12451
12452 rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12453 if (!rc) {
12454 req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12455 req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12456 req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12457 req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12458 rc = hwrm_req_send(bp, req);
12459 }
12460 if (rc != -ENODEV)
12461 netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12462 }
12463 bp->fw_reset_timestamp = jiffies;
12464 }
12465
bnxt_fw_reset_timeout(struct bnxt * bp)12466 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12467 {
12468 return time_after(jiffies, bp->fw_reset_timestamp +
12469 (bp->fw_reset_max_dsecs * HZ / 10));
12470 }
12471
bnxt_fw_reset_abort(struct bnxt * bp,int rc)12472 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12473 {
12474 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12475 if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12476 bnxt_ulp_start(bp, rc);
12477 bnxt_dl_health_fw_status_update(bp, false);
12478 }
12479 bp->fw_reset_state = 0;
12480 dev_close(bp->dev);
12481 }
12482
bnxt_fw_reset_task(struct work_struct * work)12483 static void bnxt_fw_reset_task(struct work_struct *work)
12484 {
12485 struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12486 int rc = 0;
12487
12488 if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12489 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12490 return;
12491 }
12492
12493 switch (bp->fw_reset_state) {
12494 case BNXT_FW_RESET_STATE_POLL_VF: {
12495 int n = bnxt_get_registered_vfs(bp);
12496 int tmo;
12497
12498 if (n < 0) {
12499 netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12500 n, jiffies_to_msecs(jiffies -
12501 bp->fw_reset_timestamp));
12502 goto fw_reset_abort;
12503 } else if (n > 0) {
12504 if (bnxt_fw_reset_timeout(bp)) {
12505 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12506 bp->fw_reset_state = 0;
12507 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12508 n);
12509 return;
12510 }
12511 bnxt_queue_fw_reset_work(bp, HZ / 10);
12512 return;
12513 }
12514 bp->fw_reset_timestamp = jiffies;
12515 rtnl_lock();
12516 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12517 bnxt_fw_reset_abort(bp, rc);
12518 rtnl_unlock();
12519 return;
12520 }
12521 bnxt_fw_reset_close(bp);
12522 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12523 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12524 tmo = HZ / 10;
12525 } else {
12526 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12527 tmo = bp->fw_reset_min_dsecs * HZ / 10;
12528 }
12529 rtnl_unlock();
12530 bnxt_queue_fw_reset_work(bp, tmo);
12531 return;
12532 }
12533 case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12534 u32 val;
12535
12536 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12537 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12538 !bnxt_fw_reset_timeout(bp)) {
12539 bnxt_queue_fw_reset_work(bp, HZ / 5);
12540 return;
12541 }
12542
12543 if (!bp->fw_health->primary) {
12544 u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12545
12546 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12547 bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12548 return;
12549 }
12550 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12551 }
12552 fallthrough;
12553 case BNXT_FW_RESET_STATE_RESET_FW:
12554 bnxt_reset_all(bp);
12555 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12556 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12557 return;
12558 case BNXT_FW_RESET_STATE_ENABLE_DEV:
12559 bnxt_inv_fw_health_reg(bp);
12560 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12561 !bp->fw_reset_min_dsecs) {
12562 u16 val;
12563
12564 pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12565 if (val == 0xffff) {
12566 if (bnxt_fw_reset_timeout(bp)) {
12567 netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12568 rc = -ETIMEDOUT;
12569 goto fw_reset_abort;
12570 }
12571 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12572 return;
12573 }
12574 }
12575 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12576 clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12577 if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12578 !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12579 bnxt_dl_remote_reload(bp);
12580 if (pci_enable_device(bp->pdev)) {
12581 netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12582 rc = -ENODEV;
12583 goto fw_reset_abort;
12584 }
12585 pci_set_master(bp->pdev);
12586 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12587 fallthrough;
12588 case BNXT_FW_RESET_STATE_POLL_FW:
12589 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12590 rc = bnxt_hwrm_poll(bp);
12591 if (rc) {
12592 if (bnxt_fw_reset_timeout(bp)) {
12593 netdev_err(bp->dev, "Firmware reset aborted\n");
12594 goto fw_reset_abort_status;
12595 }
12596 bnxt_queue_fw_reset_work(bp, HZ / 5);
12597 return;
12598 }
12599 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12600 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12601 fallthrough;
12602 case BNXT_FW_RESET_STATE_OPENING:
12603 while (!rtnl_trylock()) {
12604 bnxt_queue_fw_reset_work(bp, HZ / 10);
12605 return;
12606 }
12607 rc = bnxt_open(bp->dev);
12608 if (rc) {
12609 netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12610 bnxt_fw_reset_abort(bp, rc);
12611 rtnl_unlock();
12612 return;
12613 }
12614
12615 if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12616 bp->fw_health->enabled) {
12617 bp->fw_health->last_fw_reset_cnt =
12618 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12619 }
12620 bp->fw_reset_state = 0;
12621 /* Make sure fw_reset_state is 0 before clearing the flag */
12622 smp_mb__before_atomic();
12623 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12624 bnxt_ulp_start(bp, 0);
12625 bnxt_reenable_sriov(bp);
12626 bnxt_vf_reps_alloc(bp);
12627 bnxt_vf_reps_open(bp);
12628 bnxt_ptp_reapply_pps(bp);
12629 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12630 if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12631 bnxt_dl_health_fw_recovery_done(bp);
12632 bnxt_dl_health_fw_status_update(bp, true);
12633 }
12634 rtnl_unlock();
12635 break;
12636 }
12637 return;
12638
12639 fw_reset_abort_status:
12640 if (bp->fw_health->status_reliable ||
12641 (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12642 u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12643
12644 netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12645 }
12646 fw_reset_abort:
12647 rtnl_lock();
12648 bnxt_fw_reset_abort(bp, rc);
12649 rtnl_unlock();
12650 }
12651
bnxt_init_board(struct pci_dev * pdev,struct net_device * dev)12652 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12653 {
12654 int rc;
12655 struct bnxt *bp = netdev_priv(dev);
12656
12657 SET_NETDEV_DEV(dev, &pdev->dev);
12658
12659 /* enable device (incl. PCI PM wakeup), and bus-mastering */
12660 rc = pci_enable_device(pdev);
12661 if (rc) {
12662 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12663 goto init_err;
12664 }
12665
12666 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12667 dev_err(&pdev->dev,
12668 "Cannot find PCI device base address, aborting\n");
12669 rc = -ENODEV;
12670 goto init_err_disable;
12671 }
12672
12673 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12674 if (rc) {
12675 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12676 goto init_err_disable;
12677 }
12678
12679 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12680 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12681 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12682 rc = -EIO;
12683 goto init_err_release;
12684 }
12685
12686 pci_set_master(pdev);
12687
12688 bp->dev = dev;
12689 bp->pdev = pdev;
12690
12691 /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12692 * determines the BAR size.
12693 */
12694 bp->bar0 = pci_ioremap_bar(pdev, 0);
12695 if (!bp->bar0) {
12696 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12697 rc = -ENOMEM;
12698 goto init_err_release;
12699 }
12700
12701 bp->bar2 = pci_ioremap_bar(pdev, 4);
12702 if (!bp->bar2) {
12703 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12704 rc = -ENOMEM;
12705 goto init_err_release;
12706 }
12707
12708 INIT_WORK(&bp->sp_task, bnxt_sp_task);
12709 INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12710
12711 spin_lock_init(&bp->ntp_fltr_lock);
12712 #if BITS_PER_LONG == 32
12713 spin_lock_init(&bp->db_lock);
12714 #endif
12715
12716 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12717 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12718
12719 timer_setup(&bp->timer, bnxt_timer, 0);
12720 bp->current_interval = BNXT_TIMER_INTERVAL;
12721
12722 bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12723 bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12724
12725 clear_bit(BNXT_STATE_OPEN, &bp->state);
12726 return 0;
12727
12728 init_err_release:
12729 bnxt_unmap_bars(bp, pdev);
12730 pci_release_regions(pdev);
12731
12732 init_err_disable:
12733 pci_disable_device(pdev);
12734
12735 init_err:
12736 return rc;
12737 }
12738
12739 /* rtnl_lock held */
bnxt_change_mac_addr(struct net_device * dev,void * p)12740 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12741 {
12742 struct sockaddr *addr = p;
12743 struct bnxt *bp = netdev_priv(dev);
12744 int rc = 0;
12745
12746 if (!is_valid_ether_addr(addr->sa_data))
12747 return -EADDRNOTAVAIL;
12748
12749 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12750 return 0;
12751
12752 rc = bnxt_approve_mac(bp, addr->sa_data, true);
12753 if (rc)
12754 return rc;
12755
12756 eth_hw_addr_set(dev, addr->sa_data);
12757 if (netif_running(dev)) {
12758 bnxt_close_nic(bp, false, false);
12759 rc = bnxt_open_nic(bp, false, false);
12760 }
12761
12762 return rc;
12763 }
12764
12765 /* rtnl_lock held */
bnxt_change_mtu(struct net_device * dev,int new_mtu)12766 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12767 {
12768 struct bnxt *bp = netdev_priv(dev);
12769
12770 if (netif_running(dev))
12771 bnxt_close_nic(bp, true, false);
12772
12773 dev->mtu = new_mtu;
12774 bnxt_set_ring_params(bp);
12775
12776 if (netif_running(dev))
12777 return bnxt_open_nic(bp, true, false);
12778
12779 return 0;
12780 }
12781
bnxt_setup_mq_tc(struct net_device * dev,u8 tc)12782 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12783 {
12784 struct bnxt *bp = netdev_priv(dev);
12785 bool sh = false;
12786 int rc;
12787
12788 if (tc > bp->max_tc) {
12789 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12790 tc, bp->max_tc);
12791 return -EINVAL;
12792 }
12793
12794 if (netdev_get_num_tc(dev) == tc)
12795 return 0;
12796
12797 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12798 sh = true;
12799
12800 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12801 sh, tc, bp->tx_nr_rings_xdp);
12802 if (rc)
12803 return rc;
12804
12805 /* Needs to close the device and do hw resource re-allocations */
12806 if (netif_running(bp->dev))
12807 bnxt_close_nic(bp, true, false);
12808
12809 if (tc) {
12810 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12811 netdev_set_num_tc(dev, tc);
12812 } else {
12813 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12814 netdev_reset_tc(dev);
12815 }
12816 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12817 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12818 bp->tx_nr_rings + bp->rx_nr_rings;
12819
12820 if (netif_running(bp->dev))
12821 return bnxt_open_nic(bp, true, false);
12822
12823 return 0;
12824 }
12825
bnxt_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)12826 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12827 void *cb_priv)
12828 {
12829 struct bnxt *bp = cb_priv;
12830
12831 if (!bnxt_tc_flower_enabled(bp) ||
12832 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12833 return -EOPNOTSUPP;
12834
12835 switch (type) {
12836 case TC_SETUP_CLSFLOWER:
12837 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12838 default:
12839 return -EOPNOTSUPP;
12840 }
12841 }
12842
12843 LIST_HEAD(bnxt_block_cb_list);
12844
bnxt_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)12845 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12846 void *type_data)
12847 {
12848 struct bnxt *bp = netdev_priv(dev);
12849
12850 switch (type) {
12851 case TC_SETUP_BLOCK:
12852 return flow_block_cb_setup_simple(type_data,
12853 &bnxt_block_cb_list,
12854 bnxt_setup_tc_block_cb,
12855 bp, bp, true);
12856 case TC_SETUP_QDISC_MQPRIO: {
12857 struct tc_mqprio_qopt *mqprio = type_data;
12858
12859 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12860
12861 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12862 }
12863 default:
12864 return -EOPNOTSUPP;
12865 }
12866 }
12867
12868 #ifdef CONFIG_RFS_ACCEL
bnxt_fltr_match(struct bnxt_ntuple_filter * f1,struct bnxt_ntuple_filter * f2)12869 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12870 struct bnxt_ntuple_filter *f2)
12871 {
12872 struct flow_keys *keys1 = &f1->fkeys;
12873 struct flow_keys *keys2 = &f2->fkeys;
12874
12875 if (keys1->basic.n_proto != keys2->basic.n_proto ||
12876 keys1->basic.ip_proto != keys2->basic.ip_proto)
12877 return false;
12878
12879 if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12880 if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12881 keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12882 return false;
12883 } else {
12884 if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12885 sizeof(keys1->addrs.v6addrs.src)) ||
12886 memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12887 sizeof(keys1->addrs.v6addrs.dst)))
12888 return false;
12889 }
12890
12891 if (keys1->ports.ports == keys2->ports.ports &&
12892 keys1->control.flags == keys2->control.flags &&
12893 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12894 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12895 return true;
12896
12897 return false;
12898 }
12899
bnxt_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)12900 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12901 u16 rxq_index, u32 flow_id)
12902 {
12903 struct bnxt *bp = netdev_priv(dev);
12904 struct bnxt_ntuple_filter *fltr, *new_fltr;
12905 struct flow_keys *fkeys;
12906 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12907 int rc = 0, idx, bit_id, l2_idx = 0;
12908 struct hlist_head *head;
12909 u32 flags;
12910
12911 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12912 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12913 int off = 0, j;
12914
12915 netif_addr_lock_bh(dev);
12916 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12917 if (ether_addr_equal(eth->h_dest,
12918 vnic->uc_list + off)) {
12919 l2_idx = j + 1;
12920 break;
12921 }
12922 }
12923 netif_addr_unlock_bh(dev);
12924 if (!l2_idx)
12925 return -EINVAL;
12926 }
12927 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12928 if (!new_fltr)
12929 return -ENOMEM;
12930
12931 fkeys = &new_fltr->fkeys;
12932 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12933 rc = -EPROTONOSUPPORT;
12934 goto err_free;
12935 }
12936
12937 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12938 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12939 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12940 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12941 rc = -EPROTONOSUPPORT;
12942 goto err_free;
12943 }
12944 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12945 bp->hwrm_spec_code < 0x10601) {
12946 rc = -EPROTONOSUPPORT;
12947 goto err_free;
12948 }
12949 flags = fkeys->control.flags;
12950 if (((flags & FLOW_DIS_ENCAPSULATION) &&
12951 bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12952 rc = -EPROTONOSUPPORT;
12953 goto err_free;
12954 }
12955
12956 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12957 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12958
12959 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12960 head = &bp->ntp_fltr_hash_tbl[idx];
12961 rcu_read_lock();
12962 hlist_for_each_entry_rcu(fltr, head, hash) {
12963 if (bnxt_fltr_match(fltr, new_fltr)) {
12964 rc = fltr->sw_id;
12965 rcu_read_unlock();
12966 goto err_free;
12967 }
12968 }
12969 rcu_read_unlock();
12970
12971 spin_lock_bh(&bp->ntp_fltr_lock);
12972 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12973 BNXT_NTP_FLTR_MAX_FLTR, 0);
12974 if (bit_id < 0) {
12975 spin_unlock_bh(&bp->ntp_fltr_lock);
12976 rc = -ENOMEM;
12977 goto err_free;
12978 }
12979
12980 new_fltr->sw_id = (u16)bit_id;
12981 new_fltr->flow_id = flow_id;
12982 new_fltr->l2_fltr_idx = l2_idx;
12983 new_fltr->rxq = rxq_index;
12984 hlist_add_head_rcu(&new_fltr->hash, head);
12985 bp->ntp_fltr_count++;
12986 spin_unlock_bh(&bp->ntp_fltr_lock);
12987
12988 bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT);
12989
12990 return new_fltr->sw_id;
12991
12992 err_free:
12993 kfree(new_fltr);
12994 return rc;
12995 }
12996
bnxt_cfg_ntp_filters(struct bnxt * bp)12997 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12998 {
12999 int i;
13000
13001 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
13002 struct hlist_head *head;
13003 struct hlist_node *tmp;
13004 struct bnxt_ntuple_filter *fltr;
13005 int rc;
13006
13007 head = &bp->ntp_fltr_hash_tbl[i];
13008 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
13009 bool del = false;
13010
13011 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
13012 if (rps_may_expire_flow(bp->dev, fltr->rxq,
13013 fltr->flow_id,
13014 fltr->sw_id)) {
13015 bnxt_hwrm_cfa_ntuple_filter_free(bp,
13016 fltr);
13017 del = true;
13018 }
13019 } else {
13020 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
13021 fltr);
13022 if (rc)
13023 del = true;
13024 else
13025 set_bit(BNXT_FLTR_VALID, &fltr->state);
13026 }
13027
13028 if (del) {
13029 spin_lock_bh(&bp->ntp_fltr_lock);
13030 hlist_del_rcu(&fltr->hash);
13031 bp->ntp_fltr_count--;
13032 spin_unlock_bh(&bp->ntp_fltr_lock);
13033 synchronize_rcu();
13034 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
13035 kfree(fltr);
13036 }
13037 }
13038 }
13039 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
13040 netdev_info(bp->dev, "Receive PF driver unload event!\n");
13041 }
13042
13043 #else
13044
bnxt_cfg_ntp_filters(struct bnxt * bp)13045 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
13046 {
13047 }
13048
13049 #endif /* CONFIG_RFS_ACCEL */
13050
bnxt_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)13051 static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
13052 unsigned int entry, struct udp_tunnel_info *ti)
13053 {
13054 struct bnxt *bp = netdev_priv(netdev);
13055 unsigned int cmd;
13056
13057 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
13058 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13059 else
13060 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13061
13062 return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
13063 }
13064
bnxt_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)13065 static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
13066 unsigned int entry, struct udp_tunnel_info *ti)
13067 {
13068 struct bnxt *bp = netdev_priv(netdev);
13069 unsigned int cmd;
13070
13071 if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
13072 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
13073 else
13074 cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
13075
13076 return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
13077 }
13078
13079 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
13080 .set_port = bnxt_udp_tunnel_set_port,
13081 .unset_port = bnxt_udp_tunnel_unset_port,
13082 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13083 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13084 .tables = {
13085 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
13086 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13087 },
13088 };
13089
bnxt_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)13090 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13091 struct net_device *dev, u32 filter_mask,
13092 int nlflags)
13093 {
13094 struct bnxt *bp = netdev_priv(dev);
13095
13096 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13097 nlflags, filter_mask, NULL);
13098 }
13099
bnxt_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)13100 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
13101 u16 flags, struct netlink_ext_ack *extack)
13102 {
13103 struct bnxt *bp = netdev_priv(dev);
13104 struct nlattr *attr, *br_spec;
13105 int rem, rc = 0;
13106
13107 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13108 return -EOPNOTSUPP;
13109
13110 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13111 if (!br_spec)
13112 return -EINVAL;
13113
13114 nla_for_each_nested(attr, br_spec, rem) {
13115 u16 mode;
13116
13117 if (nla_type(attr) != IFLA_BRIDGE_MODE)
13118 continue;
13119
13120 mode = nla_get_u16(attr);
13121 if (mode == bp->br_mode)
13122 break;
13123
13124 rc = bnxt_hwrm_set_br_mode(bp, mode);
13125 if (!rc)
13126 bp->br_mode = mode;
13127 break;
13128 }
13129 return rc;
13130 }
13131
bnxt_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)13132 int bnxt_get_port_parent_id(struct net_device *dev,
13133 struct netdev_phys_item_id *ppid)
13134 {
13135 struct bnxt *bp = netdev_priv(dev);
13136
13137 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13138 return -EOPNOTSUPP;
13139
13140 /* The PF and it's VF-reps only support the switchdev framework */
13141 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
13142 return -EOPNOTSUPP;
13143
13144 ppid->id_len = sizeof(bp->dsn);
13145 memcpy(ppid->id, bp->dsn, ppid->id_len);
13146
13147 return 0;
13148 }
13149
13150 static const struct net_device_ops bnxt_netdev_ops = {
13151 .ndo_open = bnxt_open,
13152 .ndo_start_xmit = bnxt_start_xmit,
13153 .ndo_stop = bnxt_close,
13154 .ndo_get_stats64 = bnxt_get_stats64,
13155 .ndo_set_rx_mode = bnxt_set_rx_mode,
13156 .ndo_eth_ioctl = bnxt_ioctl,
13157 .ndo_validate_addr = eth_validate_addr,
13158 .ndo_set_mac_address = bnxt_change_mac_addr,
13159 .ndo_change_mtu = bnxt_change_mtu,
13160 .ndo_fix_features = bnxt_fix_features,
13161 .ndo_set_features = bnxt_set_features,
13162 .ndo_features_check = bnxt_features_check,
13163 .ndo_tx_timeout = bnxt_tx_timeout,
13164 #ifdef CONFIG_BNXT_SRIOV
13165 .ndo_get_vf_config = bnxt_get_vf_config,
13166 .ndo_set_vf_mac = bnxt_set_vf_mac,
13167 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
13168 .ndo_set_vf_rate = bnxt_set_vf_bw,
13169 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
13170 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
13171 .ndo_set_vf_trust = bnxt_set_vf_trust,
13172 #endif
13173 .ndo_setup_tc = bnxt_setup_tc,
13174 #ifdef CONFIG_RFS_ACCEL
13175 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
13176 #endif
13177 .ndo_bpf = bnxt_xdp,
13178 .ndo_xdp_xmit = bnxt_xdp_xmit,
13179 .ndo_bridge_getlink = bnxt_bridge_getlink,
13180 .ndo_bridge_setlink = bnxt_bridge_setlink,
13181 };
13182
bnxt_remove_one(struct pci_dev * pdev)13183 static void bnxt_remove_one(struct pci_dev *pdev)
13184 {
13185 struct net_device *dev = pci_get_drvdata(pdev);
13186 struct bnxt *bp = netdev_priv(dev);
13187
13188 if (BNXT_PF(bp))
13189 bnxt_sriov_disable(bp);
13190
13191 bnxt_rdma_aux_device_uninit(bp);
13192
13193 bnxt_ptp_clear(bp);
13194 unregister_netdev(dev);
13195 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13196 /* Flush any pending tasks */
13197 cancel_work_sync(&bp->sp_task);
13198 cancel_delayed_work_sync(&bp->fw_reset_task);
13199 bp->sp_event = 0;
13200
13201 bnxt_dl_fw_reporters_destroy(bp);
13202 bnxt_dl_unregister(bp);
13203 bnxt_shutdown_tc(bp);
13204
13205 bnxt_clear_int_mode(bp);
13206 bnxt_hwrm_func_drv_unrgtr(bp);
13207 bnxt_free_hwrm_resources(bp);
13208 bnxt_ethtool_free(bp);
13209 bnxt_dcb_free(bp);
13210 kfree(bp->ptp_cfg);
13211 bp->ptp_cfg = NULL;
13212 kfree(bp->fw_health);
13213 bp->fw_health = NULL;
13214 bnxt_cleanup_pci(bp);
13215 bnxt_free_ctx_mem(bp);
13216 kfree(bp->ctx);
13217 bp->ctx = NULL;
13218 kfree(bp->rss_indir_tbl);
13219 bp->rss_indir_tbl = NULL;
13220 bnxt_free_port_stats(bp);
13221 free_netdev(dev);
13222 }
13223
bnxt_probe_phy(struct bnxt * bp,bool fw_dflt)13224 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13225 {
13226 int rc = 0;
13227 struct bnxt_link_info *link_info = &bp->link_info;
13228
13229 bp->phy_flags = 0;
13230 rc = bnxt_hwrm_phy_qcaps(bp);
13231 if (rc) {
13232 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13233 rc);
13234 return rc;
13235 }
13236 if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13237 bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13238 else
13239 bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13240 if (!fw_dflt)
13241 return 0;
13242
13243 mutex_lock(&bp->link_lock);
13244 rc = bnxt_update_link(bp, false);
13245 if (rc) {
13246 mutex_unlock(&bp->link_lock);
13247 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13248 rc);
13249 return rc;
13250 }
13251
13252 /* Older firmware does not have supported_auto_speeds, so assume
13253 * that all supported speeds can be autonegotiated.
13254 */
13255 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13256 link_info->support_auto_speeds = link_info->support_speeds;
13257
13258 bnxt_init_ethtool_link_settings(bp);
13259 mutex_unlock(&bp->link_lock);
13260 return 0;
13261 }
13262
bnxt_get_max_irq(struct pci_dev * pdev)13263 static int bnxt_get_max_irq(struct pci_dev *pdev)
13264 {
13265 u16 ctrl;
13266
13267 if (!pdev->msix_cap)
13268 return 1;
13269
13270 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13271 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13272 }
13273
_bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,int * max_cp)13274 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13275 int *max_cp)
13276 {
13277 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13278 int max_ring_grps = 0, max_irq;
13279
13280 *max_tx = hw_resc->max_tx_rings;
13281 *max_rx = hw_resc->max_rx_rings;
13282 *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13283 max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13284 bnxt_get_ulp_msix_num(bp),
13285 hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13286 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13287 *max_cp = min_t(int, *max_cp, max_irq);
13288 max_ring_grps = hw_resc->max_hw_ring_grps;
13289 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13290 *max_cp -= 1;
13291 *max_rx -= 2;
13292 }
13293 if (bp->flags & BNXT_FLAG_AGG_RINGS)
13294 *max_rx >>= 1;
13295 if (bp->flags & BNXT_FLAG_CHIP_P5) {
13296 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13297 /* On P5 chips, max_cp output param should be available NQs */
13298 *max_cp = max_irq;
13299 }
13300 *max_rx = min_t(int, *max_rx, max_ring_grps);
13301 }
13302
bnxt_get_max_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)13303 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13304 {
13305 int rx, tx, cp;
13306
13307 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13308 *max_rx = rx;
13309 *max_tx = tx;
13310 if (!rx || !tx || !cp)
13311 return -ENOMEM;
13312
13313 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13314 }
13315
bnxt_get_dflt_rings(struct bnxt * bp,int * max_rx,int * max_tx,bool shared)13316 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13317 bool shared)
13318 {
13319 int rc;
13320
13321 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13322 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13323 /* Not enough rings, try disabling agg rings. */
13324 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13325 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13326 if (rc) {
13327 /* set BNXT_FLAG_AGG_RINGS back for consistency */
13328 bp->flags |= BNXT_FLAG_AGG_RINGS;
13329 return rc;
13330 }
13331 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13332 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13333 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13334 bnxt_set_ring_params(bp);
13335 }
13336
13337 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13338 int max_cp, max_stat, max_irq;
13339
13340 /* Reserve minimum resources for RoCE */
13341 max_cp = bnxt_get_max_func_cp_rings(bp);
13342 max_stat = bnxt_get_max_func_stat_ctxs(bp);
13343 max_irq = bnxt_get_max_func_irqs(bp);
13344 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13345 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13346 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13347 return 0;
13348
13349 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13350 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13351 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13352 max_cp = min_t(int, max_cp, max_irq);
13353 max_cp = min_t(int, max_cp, max_stat);
13354 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13355 if (rc)
13356 rc = 0;
13357 }
13358 return rc;
13359 }
13360
13361 /* In initial default shared ring setting, each shared ring must have a
13362 * RX/TX ring pair.
13363 */
bnxt_trim_dflt_sh_rings(struct bnxt * bp)13364 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13365 {
13366 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13367 bp->rx_nr_rings = bp->cp_nr_rings;
13368 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13369 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13370 }
13371
bnxt_set_dflt_rings(struct bnxt * bp,bool sh)13372 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13373 {
13374 int dflt_rings, max_rx_rings, max_tx_rings, rc;
13375
13376 if (!bnxt_can_reserve_rings(bp))
13377 return 0;
13378
13379 if (sh)
13380 bp->flags |= BNXT_FLAG_SHARED_RINGS;
13381 dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13382 /* Reduce default rings on multi-port cards so that total default
13383 * rings do not exceed CPU count.
13384 */
13385 if (bp->port_count > 1) {
13386 int max_rings =
13387 max_t(int, num_online_cpus() / bp->port_count, 1);
13388
13389 dflt_rings = min_t(int, dflt_rings, max_rings);
13390 }
13391 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13392 if (rc)
13393 return rc;
13394 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13395 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13396 if (sh)
13397 bnxt_trim_dflt_sh_rings(bp);
13398 else
13399 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13400 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13401
13402 rc = __bnxt_reserve_rings(bp);
13403 if (rc && rc != -ENODEV)
13404 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13405 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13406 if (sh)
13407 bnxt_trim_dflt_sh_rings(bp);
13408
13409 /* Rings may have been trimmed, re-reserve the trimmed rings. */
13410 if (bnxt_need_reserve_rings(bp)) {
13411 rc = __bnxt_reserve_rings(bp);
13412 if (rc && rc != -ENODEV)
13413 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13414 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13415 }
13416 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13417 bp->rx_nr_rings++;
13418 bp->cp_nr_rings++;
13419 }
13420 if (rc) {
13421 bp->tx_nr_rings = 0;
13422 bp->rx_nr_rings = 0;
13423 }
13424 return rc;
13425 }
13426
bnxt_init_dflt_ring_mode(struct bnxt * bp)13427 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13428 {
13429 int rc;
13430
13431 if (bp->tx_nr_rings)
13432 return 0;
13433
13434 bnxt_ulp_irq_stop(bp);
13435 bnxt_clear_int_mode(bp);
13436 rc = bnxt_set_dflt_rings(bp, true);
13437 if (rc) {
13438 if (BNXT_VF(bp) && rc == -ENODEV)
13439 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13440 else
13441 netdev_err(bp->dev, "Not enough rings available.\n");
13442 goto init_dflt_ring_err;
13443 }
13444 rc = bnxt_init_int_mode(bp);
13445 if (rc)
13446 goto init_dflt_ring_err;
13447
13448 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13449
13450 bnxt_set_dflt_rfs(bp);
13451
13452 init_dflt_ring_err:
13453 bnxt_ulp_irq_restart(bp, rc);
13454 return rc;
13455 }
13456
bnxt_restore_pf_fw_resources(struct bnxt * bp)13457 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13458 {
13459 int rc;
13460
13461 ASSERT_RTNL();
13462 bnxt_hwrm_func_qcaps(bp);
13463
13464 if (netif_running(bp->dev))
13465 __bnxt_close_nic(bp, true, false);
13466
13467 bnxt_ulp_irq_stop(bp);
13468 bnxt_clear_int_mode(bp);
13469 rc = bnxt_init_int_mode(bp);
13470 bnxt_ulp_irq_restart(bp, rc);
13471
13472 if (netif_running(bp->dev)) {
13473 if (rc)
13474 dev_close(bp->dev);
13475 else
13476 rc = bnxt_open_nic(bp, true, false);
13477 }
13478
13479 return rc;
13480 }
13481
bnxt_init_mac_addr(struct bnxt * bp)13482 static int bnxt_init_mac_addr(struct bnxt *bp)
13483 {
13484 int rc = 0;
13485
13486 if (BNXT_PF(bp)) {
13487 eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13488 } else {
13489 #ifdef CONFIG_BNXT_SRIOV
13490 struct bnxt_vf_info *vf = &bp->vf;
13491 bool strict_approval = true;
13492
13493 if (is_valid_ether_addr(vf->mac_addr)) {
13494 /* overwrite netdev dev_addr with admin VF MAC */
13495 eth_hw_addr_set(bp->dev, vf->mac_addr);
13496 /* Older PF driver or firmware may not approve this
13497 * correctly.
13498 */
13499 strict_approval = false;
13500 } else {
13501 eth_hw_addr_random(bp->dev);
13502 }
13503 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13504 #endif
13505 }
13506 return rc;
13507 }
13508
bnxt_vpd_read_info(struct bnxt * bp)13509 static void bnxt_vpd_read_info(struct bnxt *bp)
13510 {
13511 struct pci_dev *pdev = bp->pdev;
13512 unsigned int vpd_size, kw_len;
13513 int pos, size;
13514 u8 *vpd_data;
13515
13516 vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13517 if (IS_ERR(vpd_data)) {
13518 pci_warn(pdev, "Unable to read VPD\n");
13519 return;
13520 }
13521
13522 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13523 PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13524 if (pos < 0)
13525 goto read_sn;
13526
13527 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13528 memcpy(bp->board_partno, &vpd_data[pos], size);
13529
13530 read_sn:
13531 pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13532 PCI_VPD_RO_KEYWORD_SERIALNO,
13533 &kw_len);
13534 if (pos < 0)
13535 goto exit;
13536
13537 size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13538 memcpy(bp->board_serialno, &vpd_data[pos], size);
13539 exit:
13540 kfree(vpd_data);
13541 }
13542
bnxt_pcie_dsn_get(struct bnxt * bp,u8 dsn[])13543 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13544 {
13545 struct pci_dev *pdev = bp->pdev;
13546 u64 qword;
13547
13548 qword = pci_get_dsn(pdev);
13549 if (!qword) {
13550 netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13551 return -EOPNOTSUPP;
13552 }
13553
13554 put_unaligned_le64(qword, dsn);
13555
13556 bp->flags |= BNXT_FLAG_DSN_VALID;
13557 return 0;
13558 }
13559
bnxt_map_db_bar(struct bnxt * bp)13560 static int bnxt_map_db_bar(struct bnxt *bp)
13561 {
13562 if (!bp->db_size)
13563 return -ENODEV;
13564 bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13565 if (!bp->bar1)
13566 return -ENOMEM;
13567 return 0;
13568 }
13569
bnxt_print_device_info(struct bnxt * bp)13570 void bnxt_print_device_info(struct bnxt *bp)
13571 {
13572 netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13573 board_info[bp->board_idx].name,
13574 (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13575
13576 pcie_print_link_status(bp->pdev);
13577 }
13578
bnxt_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)13579 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13580 {
13581 struct net_device *dev;
13582 struct bnxt *bp;
13583 int rc, max_irqs;
13584
13585 if (pci_is_bridge(pdev))
13586 return -ENODEV;
13587
13588 /* Clear any pending DMA transactions from crash kernel
13589 * while loading driver in capture kernel.
13590 */
13591 if (is_kdump_kernel()) {
13592 pci_clear_master(pdev);
13593 pcie_flr(pdev);
13594 }
13595
13596 max_irqs = bnxt_get_max_irq(pdev);
13597 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13598 if (!dev)
13599 return -ENOMEM;
13600
13601 bp = netdev_priv(dev);
13602 bp->board_idx = ent->driver_data;
13603 bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13604 bnxt_set_max_func_irqs(bp, max_irqs);
13605
13606 if (bnxt_vf_pciid(bp->board_idx))
13607 bp->flags |= BNXT_FLAG_VF;
13608
13609 /* No devlink port registration in case of a VF */
13610 if (BNXT_PF(bp))
13611 SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
13612
13613 if (pdev->msix_cap)
13614 bp->flags |= BNXT_FLAG_MSIX_CAP;
13615
13616 rc = bnxt_init_board(pdev, dev);
13617 if (rc < 0)
13618 goto init_err_free;
13619
13620 dev->netdev_ops = &bnxt_netdev_ops;
13621 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13622 dev->ethtool_ops = &bnxt_ethtool_ops;
13623 pci_set_drvdata(pdev, dev);
13624
13625 rc = bnxt_alloc_hwrm_resources(bp);
13626 if (rc)
13627 goto init_err_pci_clean;
13628
13629 mutex_init(&bp->hwrm_cmd_lock);
13630 mutex_init(&bp->link_lock);
13631
13632 rc = bnxt_fw_init_one_p1(bp);
13633 if (rc)
13634 goto init_err_pci_clean;
13635
13636 if (BNXT_PF(bp))
13637 bnxt_vpd_read_info(bp);
13638
13639 if (BNXT_CHIP_P5(bp)) {
13640 bp->flags |= BNXT_FLAG_CHIP_P5;
13641 if (BNXT_CHIP_SR2(bp))
13642 bp->flags |= BNXT_FLAG_CHIP_SR2;
13643 }
13644
13645 rc = bnxt_alloc_rss_indir_tbl(bp);
13646 if (rc)
13647 goto init_err_pci_clean;
13648
13649 rc = bnxt_fw_init_one_p2(bp);
13650 if (rc)
13651 goto init_err_pci_clean;
13652
13653 rc = bnxt_map_db_bar(bp);
13654 if (rc) {
13655 dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13656 rc);
13657 goto init_err_pci_clean;
13658 }
13659
13660 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13661 NETIF_F_TSO | NETIF_F_TSO6 |
13662 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13663 NETIF_F_GSO_IPXIP4 |
13664 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13665 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13666 NETIF_F_RXCSUM | NETIF_F_GRO;
13667
13668 if (BNXT_SUPPORTS_TPA(bp))
13669 dev->hw_features |= NETIF_F_LRO;
13670
13671 dev->hw_enc_features =
13672 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13673 NETIF_F_TSO | NETIF_F_TSO6 |
13674 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13675 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13676 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13677 dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13678
13679 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13680 NETIF_F_GSO_GRE_CSUM;
13681 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13682 if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13683 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13684 if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13685 dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13686 if (BNXT_SUPPORTS_TPA(bp))
13687 dev->hw_features |= NETIF_F_GRO_HW;
13688 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13689 if (dev->features & NETIF_F_GRO_HW)
13690 dev->features &= ~NETIF_F_LRO;
13691 dev->priv_flags |= IFF_UNICAST_FLT;
13692
13693 netif_set_tso_max_size(dev, GSO_MAX_SIZE);
13694
13695 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
13696 NETDEV_XDP_ACT_RX_SG;
13697
13698 #ifdef CONFIG_BNXT_SRIOV
13699 init_waitqueue_head(&bp->sriov_cfg_wait);
13700 #endif
13701 if (BNXT_SUPPORTS_TPA(bp)) {
13702 bp->gro_func = bnxt_gro_func_5730x;
13703 if (BNXT_CHIP_P4(bp))
13704 bp->gro_func = bnxt_gro_func_5731x;
13705 else if (BNXT_CHIP_P5(bp))
13706 bp->gro_func = bnxt_gro_func_5750x;
13707 }
13708 if (!BNXT_CHIP_P4_PLUS(bp))
13709 bp->flags |= BNXT_FLAG_DOUBLE_DB;
13710
13711 rc = bnxt_init_mac_addr(bp);
13712 if (rc) {
13713 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13714 rc = -EADDRNOTAVAIL;
13715 goto init_err_pci_clean;
13716 }
13717
13718 if (BNXT_PF(bp)) {
13719 /* Read the adapter's DSN to use as the eswitch switch_id */
13720 rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13721 }
13722
13723 /* MTU range: 60 - FW defined max */
13724 dev->min_mtu = ETH_ZLEN;
13725 dev->max_mtu = bp->max_mtu;
13726
13727 rc = bnxt_probe_phy(bp, true);
13728 if (rc)
13729 goto init_err_pci_clean;
13730
13731 bnxt_set_rx_skb_mode(bp, false);
13732 bnxt_set_tpa_flags(bp);
13733 bnxt_set_ring_params(bp);
13734 rc = bnxt_set_dflt_rings(bp, true);
13735 if (rc) {
13736 if (BNXT_VF(bp) && rc == -ENODEV) {
13737 netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13738 } else {
13739 netdev_err(bp->dev, "Not enough rings available.\n");
13740 rc = -ENOMEM;
13741 }
13742 goto init_err_pci_clean;
13743 }
13744
13745 bnxt_fw_init_one_p3(bp);
13746
13747 bnxt_init_dflt_coal(bp);
13748
13749 if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13750 bp->flags |= BNXT_FLAG_STRIP_VLAN;
13751
13752 rc = bnxt_init_int_mode(bp);
13753 if (rc)
13754 goto init_err_pci_clean;
13755
13756 /* No TC has been set yet and rings may have been trimmed due to
13757 * limited MSIX, so we re-initialize the TX rings per TC.
13758 */
13759 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13760
13761 if (BNXT_PF(bp)) {
13762 if (!bnxt_pf_wq) {
13763 bnxt_pf_wq =
13764 create_singlethread_workqueue("bnxt_pf_wq");
13765 if (!bnxt_pf_wq) {
13766 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13767 rc = -ENOMEM;
13768 goto init_err_pci_clean;
13769 }
13770 }
13771 rc = bnxt_init_tc(bp);
13772 if (rc)
13773 netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13774 rc);
13775 }
13776
13777 bnxt_inv_fw_health_reg(bp);
13778 rc = bnxt_dl_register(bp);
13779 if (rc)
13780 goto init_err_dl;
13781
13782 rc = register_netdev(dev);
13783 if (rc)
13784 goto init_err_cleanup;
13785
13786 bnxt_dl_fw_reporters_create(bp);
13787
13788 bnxt_rdma_aux_device_init(bp);
13789
13790 bnxt_print_device_info(bp);
13791
13792 pci_save_state(pdev);
13793
13794 return 0;
13795 init_err_cleanup:
13796 bnxt_dl_unregister(bp);
13797 init_err_dl:
13798 bnxt_shutdown_tc(bp);
13799 bnxt_clear_int_mode(bp);
13800
13801 init_err_pci_clean:
13802 bnxt_hwrm_func_drv_unrgtr(bp);
13803 bnxt_free_hwrm_resources(bp);
13804 bnxt_ethtool_free(bp);
13805 bnxt_ptp_clear(bp);
13806 kfree(bp->ptp_cfg);
13807 bp->ptp_cfg = NULL;
13808 kfree(bp->fw_health);
13809 bp->fw_health = NULL;
13810 bnxt_cleanup_pci(bp);
13811 bnxt_free_ctx_mem(bp);
13812 kfree(bp->ctx);
13813 bp->ctx = NULL;
13814 kfree(bp->rss_indir_tbl);
13815 bp->rss_indir_tbl = NULL;
13816
13817 init_err_free:
13818 free_netdev(dev);
13819 return rc;
13820 }
13821
bnxt_shutdown(struct pci_dev * pdev)13822 static void bnxt_shutdown(struct pci_dev *pdev)
13823 {
13824 struct net_device *dev = pci_get_drvdata(pdev);
13825 struct bnxt *bp;
13826
13827 if (!dev)
13828 return;
13829
13830 rtnl_lock();
13831 bp = netdev_priv(dev);
13832 if (!bp)
13833 goto shutdown_exit;
13834
13835 if (netif_running(dev))
13836 dev_close(dev);
13837
13838 bnxt_clear_int_mode(bp);
13839 pci_disable_device(pdev);
13840
13841 if (system_state == SYSTEM_POWER_OFF) {
13842 pci_wake_from_d3(pdev, bp->wol);
13843 pci_set_power_state(pdev, PCI_D3hot);
13844 }
13845
13846 shutdown_exit:
13847 rtnl_unlock();
13848 }
13849
13850 #ifdef CONFIG_PM_SLEEP
bnxt_suspend(struct device * device)13851 static int bnxt_suspend(struct device *device)
13852 {
13853 struct net_device *dev = dev_get_drvdata(device);
13854 struct bnxt *bp = netdev_priv(dev);
13855 int rc = 0;
13856
13857 rtnl_lock();
13858 bnxt_ulp_stop(bp);
13859 if (netif_running(dev)) {
13860 netif_device_detach(dev);
13861 rc = bnxt_close(dev);
13862 }
13863 bnxt_hwrm_func_drv_unrgtr(bp);
13864 pci_disable_device(bp->pdev);
13865 bnxt_free_ctx_mem(bp);
13866 kfree(bp->ctx);
13867 bp->ctx = NULL;
13868 rtnl_unlock();
13869 return rc;
13870 }
13871
bnxt_resume(struct device * device)13872 static int bnxt_resume(struct device *device)
13873 {
13874 struct net_device *dev = dev_get_drvdata(device);
13875 struct bnxt *bp = netdev_priv(dev);
13876 int rc = 0;
13877
13878 rtnl_lock();
13879 rc = pci_enable_device(bp->pdev);
13880 if (rc) {
13881 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13882 rc);
13883 goto resume_exit;
13884 }
13885 pci_set_master(bp->pdev);
13886 if (bnxt_hwrm_ver_get(bp)) {
13887 rc = -ENODEV;
13888 goto resume_exit;
13889 }
13890 rc = bnxt_hwrm_func_reset(bp);
13891 if (rc) {
13892 rc = -EBUSY;
13893 goto resume_exit;
13894 }
13895
13896 rc = bnxt_hwrm_func_qcaps(bp);
13897 if (rc)
13898 goto resume_exit;
13899
13900 if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13901 rc = -ENODEV;
13902 goto resume_exit;
13903 }
13904
13905 bnxt_get_wol_settings(bp);
13906 if (netif_running(dev)) {
13907 rc = bnxt_open(dev);
13908 if (!rc)
13909 netif_device_attach(dev);
13910 }
13911
13912 resume_exit:
13913 bnxt_ulp_start(bp, rc);
13914 if (!rc)
13915 bnxt_reenable_sriov(bp);
13916 rtnl_unlock();
13917 return rc;
13918 }
13919
13920 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13921 #define BNXT_PM_OPS (&bnxt_pm_ops)
13922
13923 #else
13924
13925 #define BNXT_PM_OPS NULL
13926
13927 #endif /* CONFIG_PM_SLEEP */
13928
13929 /**
13930 * bnxt_io_error_detected - called when PCI error is detected
13931 * @pdev: Pointer to PCI device
13932 * @state: The current pci connection state
13933 *
13934 * This function is called after a PCI bus error affecting
13935 * this device has been detected.
13936 */
bnxt_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)13937 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13938 pci_channel_state_t state)
13939 {
13940 struct net_device *netdev = pci_get_drvdata(pdev);
13941 struct bnxt *bp = netdev_priv(netdev);
13942
13943 netdev_info(netdev, "PCI I/O error detected\n");
13944
13945 rtnl_lock();
13946 netif_device_detach(netdev);
13947
13948 bnxt_ulp_stop(bp);
13949
13950 if (state == pci_channel_io_perm_failure) {
13951 rtnl_unlock();
13952 return PCI_ERS_RESULT_DISCONNECT;
13953 }
13954
13955 if (state == pci_channel_io_frozen)
13956 set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13957
13958 if (netif_running(netdev))
13959 bnxt_close(netdev);
13960
13961 if (pci_is_enabled(pdev))
13962 pci_disable_device(pdev);
13963 bnxt_free_ctx_mem(bp);
13964 kfree(bp->ctx);
13965 bp->ctx = NULL;
13966 rtnl_unlock();
13967
13968 /* Request a slot slot reset. */
13969 return PCI_ERS_RESULT_NEED_RESET;
13970 }
13971
13972 /**
13973 * bnxt_io_slot_reset - called after the pci bus has been reset.
13974 * @pdev: Pointer to PCI device
13975 *
13976 * Restart the card from scratch, as if from a cold-boot.
13977 * At this point, the card has exprienced a hard reset,
13978 * followed by fixups by BIOS, and has its config space
13979 * set up identically to what it was at cold boot.
13980 */
bnxt_io_slot_reset(struct pci_dev * pdev)13981 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13982 {
13983 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13984 struct net_device *netdev = pci_get_drvdata(pdev);
13985 struct bnxt *bp = netdev_priv(netdev);
13986 int retry = 0;
13987 int err = 0;
13988 int off;
13989
13990 netdev_info(bp->dev, "PCI Slot Reset\n");
13991
13992 rtnl_lock();
13993
13994 if (pci_enable_device(pdev)) {
13995 dev_err(&pdev->dev,
13996 "Cannot re-enable PCI device after reset.\n");
13997 } else {
13998 pci_set_master(pdev);
13999 /* Upon fatal error, our device internal logic that latches to
14000 * BAR value is getting reset and will restore only upon
14001 * rewritting the BARs.
14002 *
14003 * As pci_restore_state() does not re-write the BARs if the
14004 * value is same as saved value earlier, driver needs to
14005 * write the BARs to 0 to force restore, in case of fatal error.
14006 */
14007 if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
14008 &bp->state)) {
14009 for (off = PCI_BASE_ADDRESS_0;
14010 off <= PCI_BASE_ADDRESS_5; off += 4)
14011 pci_write_config_dword(bp->pdev, off, 0);
14012 }
14013 pci_restore_state(pdev);
14014 pci_save_state(pdev);
14015
14016 bnxt_inv_fw_health_reg(bp);
14017 bnxt_try_map_fw_health_reg(bp);
14018
14019 /* In some PCIe AER scenarios, firmware may take up to
14020 * 10 seconds to become ready in the worst case.
14021 */
14022 do {
14023 err = bnxt_try_recover_fw(bp);
14024 if (!err)
14025 break;
14026 retry++;
14027 } while (retry < BNXT_FW_SLOT_RESET_RETRY);
14028
14029 if (err) {
14030 dev_err(&pdev->dev, "Firmware not ready\n");
14031 goto reset_exit;
14032 }
14033
14034 err = bnxt_hwrm_func_reset(bp);
14035 if (!err)
14036 result = PCI_ERS_RESULT_RECOVERED;
14037
14038 bnxt_ulp_irq_stop(bp);
14039 bnxt_clear_int_mode(bp);
14040 err = bnxt_init_int_mode(bp);
14041 bnxt_ulp_irq_restart(bp, err);
14042 }
14043
14044 reset_exit:
14045 bnxt_clear_reservations(bp, true);
14046 rtnl_unlock();
14047
14048 return result;
14049 }
14050
14051 /**
14052 * bnxt_io_resume - called when traffic can start flowing again.
14053 * @pdev: Pointer to PCI device
14054 *
14055 * This callback is called when the error recovery driver tells
14056 * us that its OK to resume normal operation.
14057 */
bnxt_io_resume(struct pci_dev * pdev)14058 static void bnxt_io_resume(struct pci_dev *pdev)
14059 {
14060 struct net_device *netdev = pci_get_drvdata(pdev);
14061 struct bnxt *bp = netdev_priv(netdev);
14062 int err;
14063
14064 netdev_info(bp->dev, "PCI Slot Resume\n");
14065 rtnl_lock();
14066
14067 err = bnxt_hwrm_func_qcaps(bp);
14068 if (!err && netif_running(netdev))
14069 err = bnxt_open(netdev);
14070
14071 bnxt_ulp_start(bp, err);
14072 if (!err) {
14073 bnxt_reenable_sriov(bp);
14074 netif_device_attach(netdev);
14075 }
14076
14077 rtnl_unlock();
14078 }
14079
14080 static const struct pci_error_handlers bnxt_err_handler = {
14081 .error_detected = bnxt_io_error_detected,
14082 .slot_reset = bnxt_io_slot_reset,
14083 .resume = bnxt_io_resume
14084 };
14085
14086 static struct pci_driver bnxt_pci_driver = {
14087 .name = DRV_MODULE_NAME,
14088 .id_table = bnxt_pci_tbl,
14089 .probe = bnxt_init_one,
14090 .remove = bnxt_remove_one,
14091 .shutdown = bnxt_shutdown,
14092 .driver.pm = BNXT_PM_OPS,
14093 .err_handler = &bnxt_err_handler,
14094 #if defined(CONFIG_BNXT_SRIOV)
14095 .sriov_configure = bnxt_sriov_configure,
14096 #endif
14097 };
14098
bnxt_init(void)14099 static int __init bnxt_init(void)
14100 {
14101 int err;
14102
14103 bnxt_debug_init();
14104 err = pci_register_driver(&bnxt_pci_driver);
14105 if (err) {
14106 bnxt_debug_exit();
14107 return err;
14108 }
14109
14110 return 0;
14111 }
14112
bnxt_exit(void)14113 static void __exit bnxt_exit(void)
14114 {
14115 pci_unregister_driver(&bnxt_pci_driver);
14116 if (bnxt_pf_wq)
14117 destroy_workqueue(bnxt_pf_wq);
14118 bnxt_debug_exit();
14119 }
14120
14121 module_init(bnxt_init);
14122 module_exit(bnxt_exit);
14123