1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc.
3 *
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9
10 #include "mt7921.h"
11 #include "../mt76_connac2_mac.h"
12 #include "../dma.h"
13 #include "mcu.h"
14
15 static const struct pci_device_id mt7921_pci_device_table[] = {
16 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961),
17 .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
18 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922),
19 .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
20 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0608),
21 .driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
22 { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
23 .driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
24 { },
25 };
26
27 static bool mt7921_disable_aspm;
28 module_param_named(disable_aspm, mt7921_disable_aspm, bool, 0644);
29 MODULE_PARM_DESC(disable_aspm, "disable PCI ASPM support");
30
mt7921e_init_reset(struct mt792x_dev * dev)31 static int mt7921e_init_reset(struct mt792x_dev *dev)
32 {
33 return mt792x_wpdma_reset(dev, true);
34 }
35
mt7921e_unregister_device(struct mt792x_dev * dev)36 static void mt7921e_unregister_device(struct mt792x_dev *dev)
37 {
38 int i;
39 struct mt76_connac_pm *pm = &dev->pm;
40
41 cancel_work_sync(&dev->init_work);
42 mt76_unregister_device(&dev->mt76);
43 mt76_for_each_q_rx(&dev->mt76, i)
44 napi_disable(&dev->mt76.napi[i]);
45 cancel_delayed_work_sync(&pm->ps_work);
46 cancel_work_sync(&pm->wake_work);
47 cancel_work_sync(&dev->reset_work);
48
49 mt76_connac2_tx_token_put(&dev->mt76);
50 __mt792x_mcu_drv_pmctrl(dev);
51 mt792x_dma_cleanup(dev);
52 mt792x_wfsys_reset(dev);
53 skb_queue_purge(&dev->mt76.mcu.res_q);
54
55 tasklet_disable(&dev->mt76.irq_tasklet);
56 }
57
__mt7921_reg_addr(struct mt792x_dev * dev,u32 addr)58 static u32 __mt7921_reg_addr(struct mt792x_dev *dev, u32 addr)
59 {
60 static const struct mt76_connac_reg_map fixed_map[] = {
61 { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
62 { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
63 { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
64 { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
65 { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
66 { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
67 { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
68 { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
69 { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
70 { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
71 { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
72 { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
73 { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
74 { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
75 { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
76 { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
77 { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
78 { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
79 { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
80 { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
81 { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
82 { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
83 { 0x820cc000, 0x0e000, 0x01000 }, /* WF_UMAC_TOP (PP) */
84 { 0x820cd000, 0x0f000, 0x01000 }, /* WF_MDP_TOP */
85 { 0x74030000, 0x10000, 0x10000 }, /* PCIE_MAC_IREG */
86 { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
87 { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
88 { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
89 { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
90 { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
91 { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
92 { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
93 { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
94 { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
95 { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
96 { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
97 { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
98 { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
99 { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
100 { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
101 { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
102 { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
103 { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
104 { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
105 };
106 int i;
107
108 if (addr < 0x100000)
109 return addr;
110
111 for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
112 u32 ofs;
113
114 if (addr < fixed_map[i].phys)
115 continue;
116
117 ofs = addr - fixed_map[i].phys;
118 if (ofs > fixed_map[i].size)
119 continue;
120
121 return fixed_map[i].maps + ofs;
122 }
123
124 if ((addr >= 0x18000000 && addr < 0x18c00000) ||
125 (addr >= 0x70000000 && addr < 0x78000000) ||
126 (addr >= 0x7c000000 && addr < 0x7c400000))
127 return mt7921_reg_map_l1(dev, addr);
128
129 dev_err(dev->mt76.dev, "Access currently unsupported address %08x\n",
130 addr);
131
132 return 0;
133 }
134
mt7921_rr(struct mt76_dev * mdev,u32 offset)135 static u32 mt7921_rr(struct mt76_dev *mdev, u32 offset)
136 {
137 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
138 u32 addr = __mt7921_reg_addr(dev, offset);
139
140 return dev->bus_ops->rr(mdev, addr);
141 }
142
mt7921_wr(struct mt76_dev * mdev,u32 offset,u32 val)143 static void mt7921_wr(struct mt76_dev *mdev, u32 offset, u32 val)
144 {
145 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
146 u32 addr = __mt7921_reg_addr(dev, offset);
147
148 dev->bus_ops->wr(mdev, addr, val);
149 }
150
mt7921_rmw(struct mt76_dev * mdev,u32 offset,u32 mask,u32 val)151 static u32 mt7921_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
152 {
153 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
154 u32 addr = __mt7921_reg_addr(dev, offset);
155
156 return dev->bus_ops->rmw(mdev, addr, mask, val);
157 }
158
mt7921_dma_init(struct mt792x_dev * dev)159 static int mt7921_dma_init(struct mt792x_dev *dev)
160 {
161 int ret;
162
163 mt76_dma_attach(&dev->mt76);
164
165 ret = mt792x_dma_disable(dev, true);
166 if (ret)
167 return ret;
168
169 /* init tx queue */
170 ret = mt76_connac_init_tx_queues(dev->phy.mt76, MT7921_TXQ_BAND0,
171 MT7921_TX_RING_SIZE,
172 MT_TX_RING_BASE, 0);
173 if (ret)
174 return ret;
175
176 mt76_wr(dev, MT_WFDMA0_TX_RING0_EXT_CTRL, 0x4);
177
178 /* command to WM */
179 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT7921_TXQ_MCU_WM,
180 MT7921_TX_MCU_RING_SIZE, MT_TX_RING_BASE);
181 if (ret)
182 return ret;
183
184 /* firmware download */
185 ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL, MT7921_TXQ_FWDL,
186 MT7921_TX_FWDL_RING_SIZE, MT_TX_RING_BASE);
187 if (ret)
188 return ret;
189
190 /* event from WM before firmware download */
191 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
192 MT7921_RXQ_MCU_WM,
193 MT7921_RX_MCU_RING_SIZE,
194 MT_RX_BUF_SIZE, MT_RX_EVENT_RING_BASE);
195 if (ret)
196 return ret;
197
198 /* Change mcu queue after firmware download */
199 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
200 MT7921_RXQ_MCU_WM,
201 MT7921_RX_MCU_RING_SIZE,
202 MT_RX_BUF_SIZE, MT_WFDMA0(0x540));
203 if (ret)
204 return ret;
205
206 /* rx data */
207 ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
208 MT7921_RXQ_BAND0, MT7921_RX_RING_SIZE,
209 MT_RX_BUF_SIZE, MT_RX_DATA_RING_BASE);
210 if (ret)
211 return ret;
212
213 ret = mt76_init_queues(dev, mt792x_poll_rx);
214 if (ret < 0)
215 return ret;
216
217 netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
218 mt792x_poll_tx);
219 napi_enable(&dev->mt76.tx_napi);
220
221 return mt792x_dma_enable(dev);
222 }
223
mt7921_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)224 static int mt7921_pci_probe(struct pci_dev *pdev,
225 const struct pci_device_id *id)
226 {
227 static const struct mt76_driver_ops drv_ops = {
228 /* txwi_size = txd size + txp size */
229 .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_hw_txp),
230 .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
231 MT_DRV_AMSDU_OFFLOAD,
232 .survey_flags = SURVEY_INFO_TIME_TX |
233 SURVEY_INFO_TIME_RX |
234 SURVEY_INFO_TIME_BSS_RX,
235 .token_size = MT7921_TOKEN_SIZE,
236 .tx_prepare_skb = mt7921e_tx_prepare_skb,
237 .tx_complete_skb = mt76_connac_tx_complete_skb,
238 .rx_check = mt7921_rx_check,
239 .rx_skb = mt7921_queue_rx_skb,
240 .rx_poll_complete = mt792x_rx_poll_complete,
241 .sta_add = mt7921_mac_sta_add,
242 .sta_assoc = mt7921_mac_sta_assoc,
243 .sta_remove = mt7921_mac_sta_remove,
244 .update_survey = mt792x_update_channel,
245 };
246 static const struct mt792x_hif_ops mt7921_pcie_ops = {
247 .init_reset = mt7921e_init_reset,
248 .reset = mt7921e_mac_reset,
249 .mcu_init = mt7921e_mcu_init,
250 .drv_own = mt792xe_mcu_drv_pmctrl,
251 .fw_own = mt792xe_mcu_fw_pmctrl,
252 };
253 static const struct mt792x_irq_map irq_map = {
254 .host_irq_enable = MT_WFDMA0_HOST_INT_ENA,
255 .tx = {
256 .all_complete_mask = MT_INT_TX_DONE_ALL,
257 .mcu_complete_mask = MT_INT_TX_DONE_MCU,
258 },
259 .rx = {
260 .data_complete_mask = MT_INT_RX_DONE_DATA,
261 .wm_complete_mask = MT_INT_RX_DONE_WM,
262 .wm2_complete_mask = MT_INT_RX_DONE_WM2,
263 },
264 };
265 struct ieee80211_ops *ops;
266 struct mt76_bus_ops *bus_ops;
267 struct mt792x_dev *dev;
268 struct mt76_dev *mdev;
269 u8 features;
270 int ret;
271 u16 cmd;
272
273 ret = pcim_enable_device(pdev);
274 if (ret)
275 return ret;
276
277 ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
278 if (ret)
279 return ret;
280
281 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
282 if (!(cmd & PCI_COMMAND_MEMORY)) {
283 cmd |= PCI_COMMAND_MEMORY;
284 pci_write_config_word(pdev, PCI_COMMAND, cmd);
285 }
286 pci_set_master(pdev);
287
288 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
289 if (ret < 0)
290 return ret;
291
292 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
293 if (ret)
294 goto err_free_pci_vec;
295
296 if (mt7921_disable_aspm)
297 mt76_pci_disable_aspm(pdev);
298
299 ops = mt792x_get_mac80211_ops(&pdev->dev, &mt7921_ops,
300 (void *)id->driver_data, &features);
301 if (!ops) {
302 ret = -ENOMEM;
303 goto err_free_pci_vec;
304 }
305
306 mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), ops, &drv_ops);
307 if (!mdev) {
308 ret = -ENOMEM;
309 goto err_free_pci_vec;
310 }
311
312 pci_set_drvdata(pdev, mdev);
313
314 dev = container_of(mdev, struct mt792x_dev, mt76);
315 dev->fw_features = features;
316 dev->hif_ops = &mt7921_pcie_ops;
317 dev->irq_map = &irq_map;
318 mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
319 tasklet_init(&mdev->irq_tasklet, mt792x_irq_tasklet, (unsigned long)dev);
320
321 dev->phy.dev = dev;
322 dev->phy.mt76 = &dev->mt76.phy;
323 dev->mt76.phy.priv = &dev->phy;
324 dev->bus_ops = dev->mt76.bus;
325 bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
326 GFP_KERNEL);
327 if (!bus_ops) {
328 ret = -ENOMEM;
329 goto err_free_dev;
330 }
331
332 bus_ops->rr = mt7921_rr;
333 bus_ops->wr = mt7921_wr;
334 bus_ops->rmw = mt7921_rmw;
335 dev->mt76.bus = bus_ops;
336
337 ret = mt792xe_mcu_fw_pmctrl(dev);
338 if (ret)
339 goto err_free_dev;
340
341 ret = __mt792xe_mcu_drv_pmctrl(dev);
342 if (ret)
343 goto err_free_dev;
344
345 mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
346 (mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
347 dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
348
349 ret = mt792x_wfsys_reset(dev);
350 if (ret)
351 goto err_free_dev;
352
353 mt76_wr(dev, irq_map.host_irq_enable, 0);
354
355 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
356
357 ret = devm_request_irq(mdev->dev, pdev->irq, mt792x_irq_handler,
358 IRQF_SHARED, KBUILD_MODNAME, dev);
359 if (ret)
360 goto err_free_dev;
361
362 ret = mt7921_dma_init(dev);
363 if (ret)
364 goto err_free_irq;
365
366 ret = mt7921_register_device(dev);
367 if (ret)
368 goto err_free_irq;
369
370 return 0;
371
372 err_free_irq:
373 devm_free_irq(&pdev->dev, pdev->irq, dev);
374 err_free_dev:
375 mt76_free_device(&dev->mt76);
376 err_free_pci_vec:
377 pci_free_irq_vectors(pdev);
378
379 return ret;
380 }
381
mt7921_pci_remove(struct pci_dev * pdev)382 static void mt7921_pci_remove(struct pci_dev *pdev)
383 {
384 struct mt76_dev *mdev = pci_get_drvdata(pdev);
385 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
386
387 mt7921e_unregister_device(dev);
388 devm_free_irq(&pdev->dev, pdev->irq, dev);
389 mt76_free_device(&dev->mt76);
390 pci_free_irq_vectors(pdev);
391 }
392
mt7921_pci_suspend(struct device * device)393 static int mt7921_pci_suspend(struct device *device)
394 {
395 struct pci_dev *pdev = to_pci_dev(device);
396 struct mt76_dev *mdev = pci_get_drvdata(pdev);
397 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
398 struct mt76_connac_pm *pm = &dev->pm;
399 int i, err;
400
401 pm->suspended = true;
402 flush_work(&dev->reset_work);
403 cancel_delayed_work_sync(&pm->ps_work);
404 cancel_work_sync(&pm->wake_work);
405
406 err = mt792x_mcu_drv_pmctrl(dev);
407 if (err < 0)
408 goto restore_suspend;
409
410 err = mt76_connac_mcu_set_hif_suspend(mdev, true);
411 if (err)
412 goto restore_suspend;
413
414 /* always enable deep sleep during suspend to reduce
415 * power consumption
416 */
417 mt76_connac_mcu_set_deep_sleep(&dev->mt76, true);
418
419 napi_disable(&mdev->tx_napi);
420 mt76_worker_disable(&mdev->tx_worker);
421
422 mt76_for_each_q_rx(mdev, i) {
423 napi_disable(&mdev->napi[i]);
424 }
425
426 /* wait until dma is idle */
427 mt76_poll(dev, MT_WFDMA0_GLO_CFG,
428 MT_WFDMA0_GLO_CFG_TX_DMA_BUSY |
429 MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000);
430
431 /* put dma disabled */
432 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
433 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
434
435 /* disable interrupt */
436 mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
437 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
438 synchronize_irq(pdev->irq);
439 tasklet_kill(&mdev->irq_tasklet);
440
441 err = mt792x_mcu_fw_pmctrl(dev);
442 if (err)
443 goto restore_napi;
444
445 return 0;
446
447 restore_napi:
448 mt76_for_each_q_rx(mdev, i) {
449 napi_enable(&mdev->napi[i]);
450 }
451 napi_enable(&mdev->tx_napi);
452
453 if (!pm->ds_enable)
454 mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
455
456 mt76_connac_mcu_set_hif_suspend(mdev, false);
457
458 restore_suspend:
459 pm->suspended = false;
460
461 if (err < 0)
462 mt792x_reset(&dev->mt76);
463
464 return err;
465 }
466
mt7921_pci_resume(struct device * device)467 static int mt7921_pci_resume(struct device *device)
468 {
469 struct pci_dev *pdev = to_pci_dev(device);
470 struct mt76_dev *mdev = pci_get_drvdata(pdev);
471 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
472 struct mt76_connac_pm *pm = &dev->pm;
473 int i, err;
474
475 err = mt792x_mcu_drv_pmctrl(dev);
476 if (err < 0)
477 goto failed;
478
479 mt792x_wpdma_reinit_cond(dev);
480
481 /* enable interrupt */
482 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
483 mt76_connac_irq_enable(&dev->mt76,
484 dev->irq_map->tx.all_complete_mask |
485 MT_INT_RX_DONE_ALL | MT_INT_MCU_CMD);
486 mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE);
487
488 /* put dma enabled */
489 mt76_set(dev, MT_WFDMA0_GLO_CFG,
490 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
491
492 mt76_worker_enable(&mdev->tx_worker);
493
494 local_bh_disable();
495 mt76_for_each_q_rx(mdev, i) {
496 napi_enable(&mdev->napi[i]);
497 napi_schedule(&mdev->napi[i]);
498 }
499 napi_enable(&mdev->tx_napi);
500 napi_schedule(&mdev->tx_napi);
501 local_bh_enable();
502
503 /* restore previous ds setting */
504 if (!pm->ds_enable)
505 mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
506
507 err = mt76_connac_mcu_set_hif_suspend(mdev, false);
508 failed:
509 pm->suspended = false;
510
511 if (err < 0)
512 mt792x_reset(&dev->mt76);
513
514 return err;
515 }
516
mt7921_pci_shutdown(struct pci_dev * pdev)517 static void mt7921_pci_shutdown(struct pci_dev *pdev)
518 {
519 mt7921_pci_remove(pdev);
520 }
521
522 static DEFINE_SIMPLE_DEV_PM_OPS(mt7921_pm_ops, mt7921_pci_suspend, mt7921_pci_resume);
523
524 static struct pci_driver mt7921_pci_driver = {
525 .name = KBUILD_MODNAME,
526 .id_table = mt7921_pci_device_table,
527 .probe = mt7921_pci_probe,
528 .remove = mt7921_pci_remove,
529 .shutdown = mt7921_pci_shutdown,
530 .driver.pm = pm_sleep_ptr(&mt7921_pm_ops),
531 };
532
533 module_pci_driver(mt7921_pci_driver);
534
535 MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table);
536 MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
537 MODULE_FIRMWARE(MT7921_ROM_PATCH);
538 MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
539 MODULE_FIRMWARE(MT7922_ROM_PATCH);
540 MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
541 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
542 MODULE_LICENSE("Dual BSD/GPL");
543