Lines Matching +full:y +full:- +full:rp

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
21 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
22 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
46 return skb->priority; in rtw_pci_get_tx_qsel()
52 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read8()
54 return readb(rtwpci->mmap + addr); in rtw_pci_read8()
59 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read16()
61 return readw(rtwpci->mmap + addr); in rtw_pci_read16()
66 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_read32()
68 return readl(rtwpci->mmap + addr); in rtw_pci_read32()
73 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write8()
75 writeb(val, rtwpci->mmap + addr); in rtw_pci_write8()
80 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write16()
82 writew(val, rtwpci->mmap + addr); in rtw_pci_write16()
87 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_write32()
89 writel(val, rtwpci->mmap + addr); in rtw_pci_write32()
94 int offset = tx_ring->r.desc_size * idx; in rtw_pci_get_tx_desc()
96 return tx_ring->r.head + offset; in rtw_pci_get_tx_desc()
102 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_tx_ring_skbs()
108 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) { in rtw_pci_free_tx_ring_skbs()
109 __skb_unlink(skb, &tx_ring->queue); in rtw_pci_free_tx_ring_skbs()
111 dma = tx_data->dma; in rtw_pci_free_tx_ring_skbs()
113 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE); in rtw_pci_free_tx_ring_skbs()
121 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_tx_ring()
122 u8 *head = tx_ring->r.head; in rtw_pci_free_tx_ring()
123 u32 len = tx_ring->r.len; in rtw_pci_free_tx_ring()
124 int ring_sz = len * tx_ring->r.desc_size; in rtw_pci_free_tx_ring()
129 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma); in rtw_pci_free_tx_ring()
130 tx_ring->r.head = NULL; in rtw_pci_free_tx_ring()
136 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_rx_ring_skbs()
142 for (i = 0; i < rx_ring->r.len; i++) { in rtw_pci_free_rx_ring_skbs()
143 skb = rx_ring->buf[i]; in rtw_pci_free_rx_ring_skbs()
147 dma = *((dma_addr_t *)skb->cb); in rtw_pci_free_rx_ring_skbs()
148 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_free_rx_ring_skbs()
150 rx_ring->buf[i] = NULL; in rtw_pci_free_rx_ring_skbs()
157 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_free_rx_ring()
158 u8 *head = rx_ring->r.head; in rtw_pci_free_rx_ring()
159 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len; in rtw_pci_free_rx_ring()
163 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma); in rtw_pci_free_rx_ring()
168 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_free_trx_ring()
174 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_free_trx_ring()
179 rx_ring = &rtwpci->rx_rings[i]; in rtw_pci_free_trx_ring()
188 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_init_tx_ring()
195 return -EINVAL; in rtw_pci_init_tx_ring()
198 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); in rtw_pci_init_tx_ring()
201 return -ENOMEM; in rtw_pci_init_tx_ring()
204 skb_queue_head_init(&tx_ring->queue); in rtw_pci_init_tx_ring()
205 tx_ring->r.head = head; in rtw_pci_init_tx_ring()
206 tx_ring->r.dma = dma; in rtw_pci_init_tx_ring()
207 tx_ring->r.len = len; in rtw_pci_init_tx_ring()
208 tx_ring->r.desc_size = desc_size; in rtw_pci_init_tx_ring()
209 tx_ring->r.wp = 0; in rtw_pci_init_tx_ring()
210 tx_ring->r.rp = 0; in rtw_pci_init_tx_ring()
219 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_reset_rx_desc()
225 return -EINVAL; in rtw_pci_reset_rx_desc()
227 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE); in rtw_pci_reset_rx_desc()
228 if (dma_mapping_error(&pdev->dev, dma)) in rtw_pci_reset_rx_desc()
229 return -EBUSY; in rtw_pci_reset_rx_desc()
231 *((dma_addr_t *)skb->cb) = dma; in rtw_pci_reset_rx_desc()
232 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_reset_rx_desc()
235 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); in rtw_pci_reset_rx_desc()
236 buf_desc->dma = cpu_to_le32(dma); in rtw_pci_reset_rx_desc()
245 struct device *dev = rtwdev->dev; in rtw_pci_sync_rx_desc_device()
251 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_sync_rx_desc_device()
254 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE); in rtw_pci_sync_rx_desc_device()
255 buf_desc->dma = cpu_to_le32(dma); in rtw_pci_sync_rx_desc_device()
262 struct pci_dev *pdev = to_pci_dev(rtwdev->dev); in rtw_pci_init_rx_ring()
271 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL); in rtw_pci_init_rx_ring()
274 return -ENOMEM; in rtw_pci_init_rx_ring()
276 rx_ring->r.head = head; in rtw_pci_init_rx_ring()
282 ret = -ENOMEM; in rtw_pci_init_rx_ring()
286 memset(skb->data, 0, buf_sz); in rtw_pci_init_rx_ring()
287 rx_ring->buf[i] = skb; in rtw_pci_init_rx_ring()
296 rx_ring->r.dma = dma; in rtw_pci_init_rx_ring()
297 rx_ring->r.len = len; in rtw_pci_init_rx_ring()
298 rx_ring->r.desc_size = desc_size; in rtw_pci_init_rx_ring()
299 rx_ring->r.wp = 0; in rtw_pci_init_rx_ring()
300 rx_ring->r.rp = 0; in rtw_pci_init_rx_ring()
306 skb = rx_ring->buf[i]; in rtw_pci_init_rx_ring()
309 dma = *((dma_addr_t *)skb->cb); in rtw_pci_init_rx_ring()
310 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE); in rtw_pci_init_rx_ring()
312 rx_ring->buf[i] = NULL; in rtw_pci_init_rx_ring()
314 dma_free_coherent(&pdev->dev, ring_sz, head, dma); in rtw_pci_init_rx_ring()
323 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_init_trx_ring()
326 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_init_trx_ring()
332 tx_desc_size = chip->tx_buf_desc_sz; in rtw_pci_init_trx_ring()
335 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
342 rx_desc_size = chip->rx_buf_desc_sz; in rtw_pci_init_trx_ring()
345 rx_ring = &rtwpci->rx_rings[j]; in rtw_pci_init_trx_ring()
357 tx_ring = &rtwpci->tx_rings[i]; in rtw_pci_init_trx_ring()
363 rx_ring = &rtwpci->rx_rings[j]; in rtw_pci_init_trx_ring()
377 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_init()
380 rtwpci->irq_mask[0] = IMR_HIGHDOK | in rtw_pci_init()
390 rtwpci->irq_mask[1] = IMR_TXFOVW | in rtw_pci_init()
392 rtwpci->irq_mask[3] = IMR_H2CDOK | in rtw_pci_init()
394 spin_lock_init(&rtwpci->irq_lock); in rtw_pci_init()
395 spin_lock_init(&rtwpci->hwirq_lock); in rtw_pci_init()
403 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_reset_buf_desc()
411 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma; in rtw_pci_reset_buf_desc()
415 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len; in rtw_pci_reset_buf_desc()
416 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma; in rtw_pci_reset_buf_desc()
417 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0; in rtw_pci_reset_buf_desc()
418 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0; in rtw_pci_reset_buf_desc()
423 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len; in rtw_pci_reset_buf_desc()
424 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma; in rtw_pci_reset_buf_desc()
425 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0; in rtw_pci_reset_buf_desc()
426 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0; in rtw_pci_reset_buf_desc()
430 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len; in rtw_pci_reset_buf_desc()
431 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma; in rtw_pci_reset_buf_desc()
432 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0; in rtw_pci_reset_buf_desc()
433 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0; in rtw_pci_reset_buf_desc()
437 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len; in rtw_pci_reset_buf_desc()
438 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma; in rtw_pci_reset_buf_desc()
439 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0; in rtw_pci_reset_buf_desc()
440 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0; in rtw_pci_reset_buf_desc()
444 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len; in rtw_pci_reset_buf_desc()
445 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma; in rtw_pci_reset_buf_desc()
446 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0; in rtw_pci_reset_buf_desc()
447 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0; in rtw_pci_reset_buf_desc()
451 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len; in rtw_pci_reset_buf_desc()
452 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma; in rtw_pci_reset_buf_desc()
453 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0; in rtw_pci_reset_buf_desc()
454 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0; in rtw_pci_reset_buf_desc()
458 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len; in rtw_pci_reset_buf_desc()
459 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma; in rtw_pci_reset_buf_desc()
460 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0; in rtw_pci_reset_buf_desc()
461 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0; in rtw_pci_reset_buf_desc()
465 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len; in rtw_pci_reset_buf_desc()
466 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma; in rtw_pci_reset_buf_desc()
467 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0; in rtw_pci_reset_buf_desc()
468 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0; in rtw_pci_reset_buf_desc()
492 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_enable_interrupt()
494 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask); in rtw_pci_enable_interrupt()
495 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]); in rtw_pci_enable_interrupt()
497 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]); in rtw_pci_enable_interrupt()
499 rtwpci->irq_enabled = true; in rtw_pci_enable_interrupt()
501 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_enable_interrupt()
509 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_disable_interrupt()
511 if (!rtwpci->irq_enabled) in rtw_pci_disable_interrupt()
519 rtwpci->irq_enabled = false; in rtw_pci_disable_interrupt()
522 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_disable_interrupt()
530 rtwpci->rx_tag = 0; in rtw_pci_dma_reset()
535 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_setup()
550 tx_ring = &rtwpci->tx_rings[queue]; in rtw_pci_dma_release()
557 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_start()
559 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) in rtw_pci_napi_start()
562 napi_enable(&rtwpci->napi); in rtw_pci_napi_start()
567 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_stop()
569 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags)) in rtw_pci_napi_stop()
572 napi_synchronize(&rtwpci->napi); in rtw_pci_napi_stop()
573 napi_disable(&rtwpci->napi); in rtw_pci_napi_stop()
578 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_start()
582 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_start()
583 rtwpci->running = true; in rtw_pci_start()
585 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_start()
592 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_stop()
593 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_stop()
595 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
596 rtwpci->running = false; in rtw_pci_stop()
598 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
600 synchronize_irq(pdev->irq); in rtw_pci_stop()
603 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
605 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_stop()
610 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps_enter()
615 lockdep_assert_held(&rtwpci->irq_lock); in rtw_pci_deep_ps_enter()
617 /* Deep PS state is not allowed to TX-DMA */ in rtw_pci_deep_ps_enter()
626 tx_ring = &rtwpci->tx_rings[queue]; in rtw_pci_deep_ps_enter()
629 if (skb_queue_len(&tx_ring->queue)) { in rtw_pci_deep_ps_enter()
641 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags); in rtw_pci_deep_ps_enter()
647 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps_leave()
649 lockdep_assert_held(&rtwpci->irq_lock); in rtw_pci_deep_ps_leave()
651 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps_leave()
657 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_deep_ps()
659 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_deep_ps()
661 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps()
664 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags)) in rtw_pci_deep_ps()
667 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_deep_ps()
681 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; in rtw_hw_queue_mapping()
682 __le16 fc = hdr->frame_control; in rtw_hw_queue_mapping()
701 struct sk_buff *prev = skb_dequeue(&ring->queue); in rtw_pci_release_rsvd_page()
709 dma = tx_data->dma; in rtw_pci_release_rsvd_page()
710 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE); in rtw_pci_release_rsvd_page()
718 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_dma_check()
719 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_dma_check()
721 u32 desc_sz = chip->rx_buf_desc_sz; in rtw_pci_dma_check()
724 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head + in rtw_pci_dma_check()
726 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size); in rtw_pci_dma_check()
729 if (total_pkt_size != rtwpci->rx_tag) in rtw_pci_dma_check()
732 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX; in rtw_pci_dma_check()
745 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in __pci_flush_queue()
746 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q]; in __pci_flush_queue()
757 if (cur_rp == ring->r.wp) in __pci_flush_queue()
790 if (queues == BIT(rtwdev->hw->queues) - 1) { in rtw_pci_flush_queues()
791 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1; in rtw_pci_flush_queues()
793 for (i = 0; i < rtwdev->hw->queues; i++) in rtw_pci_flush_queues()
803 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_kick_off_queue()
807 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_kick_off_queue()
810 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_kick_off_queue()
812 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK); in rtw_pci_tx_kick_off_queue()
813 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_kick_off_queue()
818 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_kick_off()
822 if (test_and_clear_bit(queue, rtwpci->tx_queued)) in rtw_pci_tx_kick_off()
830 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_write_data()
831 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_tx_write_data()
835 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz; in rtw_pci_tx_write_data()
836 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz; in rtw_pci_tx_write_data()
842 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_write_data()
844 size = skb->len; in rtw_pci_tx_write_data()
848 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len)) in rtw_pci_tx_write_data()
849 return -ENOSPC; in rtw_pci_tx_write_data()
851 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); in rtw_pci_tx_write_data()
853 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue); in rtw_pci_tx_write_data()
855 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len, in rtw_pci_tx_write_data()
857 if (dma_mapping_error(&rtwpci->pdev->dev, dma)) in rtw_pci_tx_write_data()
858 return -EBUSY; in rtw_pci_tx_write_data()
863 psb_len = (skb->len - 1) / 128 + 1; in rtw_pci_tx_write_data()
874 tx_data->dma = dma; in rtw_pci_tx_write_data()
875 tx_data->sn = pkt_info->sn; in rtw_pci_tx_write_data()
877 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write_data()
879 skb_queue_tail(&ring->queue, skb); in rtw_pci_tx_write_data()
884 /* update write-index, and kick it off later */ in rtw_pci_tx_write_data()
885 set_bit(queue, rtwpci->tx_queued); in rtw_pci_tx_write_data()
886 if (++ring->r.wp >= ring->r.len) in rtw_pci_tx_write_data()
887 ring->r.wp = 0; in rtw_pci_tx_write_data()
890 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write_data()
905 return -ENOMEM; in rtw_pci_write_data_rsvd_page()
929 return -ENOMEM; in rtw_pci_write_data_h2c()
946 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_tx_write()
955 ring = &rtwpci->tx_rings[queue]; in rtw_pci_tx_write()
956 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write()
957 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { in rtw_pci_tx_write()
958 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); in rtw_pci_tx_write()
959 ring->queue_stopped = true; in rtw_pci_tx_write()
961 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_tx_write()
969 struct ieee80211_hw *hw = rtwdev->hw; in rtw_pci_tx_isr()
979 ring = &rtwpci->tx_rings[hw_queue]; in rtw_pci_tx_isr()
985 rp_idx = ring->r.rp; in rtw_pci_tx_isr()
986 if (cur_rp >= ring->r.rp) in rtw_pci_tx_isr()
987 count = cur_rp - ring->r.rp; in rtw_pci_tx_isr()
989 count = ring->r.len - (ring->r.rp - cur_rp); in rtw_pci_tx_isr()
991 while (count--) { in rtw_pci_tx_isr()
992 skb = skb_dequeue(&ring->queue); in rtw_pci_tx_isr()
994 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n", in rtw_pci_tx_isr()
995 count, hw_queue, bd_idx, ring->r.rp, cur_rp); in rtw_pci_tx_isr()
999 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len, in rtw_pci_tx_isr()
1008 if (ring->queue_stopped && in rtw_pci_tx_isr()
1009 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { in rtw_pci_tx_isr()
1012 ring->queue_stopped = false; in rtw_pci_tx_isr()
1015 if (++rp_idx >= ring->r.len) in rtw_pci_tx_isr()
1018 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); in rtw_pci_tx_isr()
1023 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { in rtw_pci_tx_isr()
1024 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); in rtw_pci_tx_isr()
1029 if (info->flags & IEEE80211_TX_CTL_NO_ACK) in rtw_pci_tx_isr()
1030 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; in rtw_pci_tx_isr()
1032 info->flags |= IEEE80211_TX_STAT_ACK; in rtw_pci_tx_isr()
1038 ring->r.rp = cur_rp; in rtw_pci_tx_isr()
1043 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_rx_isr()
1044 struct napi_struct *napi = &rtwpci->napi; in rtw_pci_rx_isr()
1056 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; in rtw_pci_get_hw_rx_ring_nr()
1059 if (cur_wp >= ring->r.wp) in rtw_pci_get_hw_rx_ring_nr()
1060 count = cur_wp - ring->r.wp; in rtw_pci_get_hw_rx_ring_nr()
1062 count = ring->r.len - (ring->r.wp - cur_wp); in rtw_pci_get_hw_rx_ring_nr()
1070 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_rx_napi()
1071 struct napi_struct *napi = &rtwpci->napi; in rtw_pci_rx_napi()
1072 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU]; in rtw_pci_rx_napi()
1076 u32 cur_rp = ring->r.rp; in rtw_pci_rx_napi()
1079 u32 pkt_desc_sz = chip->rx_pkt_desc_sz; in rtw_pci_rx_napi()
1080 u32 buf_desc_sz = chip->rx_buf_desc_sz; in rtw_pci_rx_napi()
1088 while (count--) { in rtw_pci_rx_napi()
1090 skb = ring->buf[cur_rp]; in rtw_pci_rx_napi()
1091 dma = *((dma_addr_t *)skb->cb); in rtw_pci_rx_napi()
1092 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE, in rtw_pci_rx_napi()
1094 rx_desc = skb->data; in rtw_pci_rx_napi()
1095 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status); in rtw_pci_rx_napi()
1110 skb_put_data(new, skb->data, new_len); in rtw_pci_rx_napi()
1119 memcpy(new->cb, &rx_status, sizeof(rx_status)); in rtw_pci_rx_napi()
1120 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi); in rtw_pci_rx_napi()
1125 /* new skb delivered to mac80211, re-enable original skb DMA */ in rtw_pci_rx_napi()
1130 if (++cur_rp >= ring->r.len) in rtw_pci_rx_napi()
1134 ring->r.rp = cur_rp; in rtw_pci_rx_napi()
1135 /* 'rp', the last position we have read, is seen as previous posistion in rtw_pci_rx_napi()
1138 ring->r.wp = cur_rp; in rtw_pci_rx_napi()
1139 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp); in rtw_pci_rx_napi()
1149 spin_lock_irqsave(&rtwpci->hwirq_lock, flags); in rtw_pci_irq_recognized()
1157 irq_status[0] &= rtwpci->irq_mask[0]; in rtw_pci_irq_recognized()
1158 irq_status[1] &= rtwpci->irq_mask[1]; in rtw_pci_irq_recognized()
1159 irq_status[3] &= rtwpci->irq_mask[3]; in rtw_pci_irq_recognized()
1165 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags); in rtw_pci_irq_recognized()
1171 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_interrupt_handler()
1177 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs in rtw_pci_interrupt_handler()
1178 * are cleared, the edge-triggered interrupt will not be generated when in rtw_pci_interrupt_handler()
1189 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_interrupt_threadfn()
1193 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_interrupt_threadfn()
1218 if (rtwpci->running) in rtw_pci_interrupt_threadfn()
1220 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_interrupt_threadfn()
1228 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_io_mapping()
1240 rtwpci->mmap = pci_iomap(pdev, bar_id, len); in rtw_pci_io_mapping()
1241 if (!rtwpci->mmap) { in rtw_pci_io_mapping()
1244 return -ENOMEM; in rtw_pci_io_mapping()
1253 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_io_unmapping()
1255 if (rtwpci->mmap) { in rtw_pci_io_unmapping()
1256 pci_iounmap(pdev, rtwpci->mmap); in rtw_pci_io_unmapping()
1306 return -EIO; in rtw_dbi_read8()
1400 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_link_ps()
1407 * experienced some inter-operability issues that the link tends in rtw_pci_link_ps()
1412 if (rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1) in rtw_pci_link_ps()
1418 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_link_cfg()
1419 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_link_cfg()
1420 struct pci_dev *pdev = rtwpci->pdev; in rtw_pci_link_cfg()
1427 if (chip->id == RTW_CHIP_TYPE_8822C) in rtw_pci_link_cfg()
1439 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device in rtw_pci_link_cfg()
1455 rtwpci->link_ctrl = link_ctrl; in rtw_pci_link_cfg()
1460 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_interface_cfg()
1462 switch (chip->id) { in rtw_pci_interface_cfg()
1464 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D) in rtw_pci_interface_cfg()
1475 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_phy_cfg()
1482 cut = BIT(0) << rtwdev->hal.cut_version; in rtw_pci_phy_cfg()
1484 for (i = 0; i < chip->intf_table->n_gen1_para; i++) { in rtw_pci_phy_cfg()
1485 para = &chip->intf_table->gen1_para[i]; in rtw_pci_phy_cfg()
1486 if (!(para->cut_mask & cut)) in rtw_pci_phy_cfg()
1488 if (para->offset == 0xffff) in rtw_pci_phy_cfg()
1490 offset = para->offset; in rtw_pci_phy_cfg()
1491 value = para->value; in rtw_pci_phy_cfg()
1492 if (para->ip_sel == RTW_IP_SEL_PHY) in rtw_pci_phy_cfg()
1498 for (i = 0; i < chip->intf_table->n_gen2_para; i++) { in rtw_pci_phy_cfg()
1499 para = &chip->intf_table->gen2_para[i]; in rtw_pci_phy_cfg()
1500 if (!(para->cut_mask & cut)) in rtw_pci_phy_cfg()
1502 if (para->offset == 0xffff) in rtw_pci_phy_cfg()
1504 offset = para->offset; in rtw_pci_phy_cfg()
1505 value = para->value; in rtw_pci_phy_cfg()
1506 if (para->ip_sel == RTW_IP_SEL_PHY) in rtw_pci_phy_cfg()
1518 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_suspend()
1519 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_suspend()
1520 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_suspend()
1522 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) in rtw_pci_suspend()
1530 struct rtw_dev *rtwdev = hw->priv; in rtw_pci_resume()
1531 struct rtw_chip_info *chip = rtwdev->chip; in rtw_pci_resume()
1532 struct rtw_efuse *efuse = &rtwdev->efuse; in rtw_pci_resume()
1534 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6) in rtw_pci_resume()
1553 pci_set_drvdata(pdev, rtwdev->hw); in rtw_pci_claim()
1554 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev); in rtw_pci_claim()
1570 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_setup_resource()
1571 rtwpci->pdev = pdev; in rtw_pci_setup_resource()
1636 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq, in rtw_pci_request_irq()
1650 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev); in rtw_pci_free_irq()
1665 budget - work_done); in rtw_pci_napi_poll()
1672 spin_lock_bh(&rtwpci->irq_lock); in rtw_pci_napi_poll()
1673 if (rtwpci->running) in rtw_pci_napi_poll()
1675 spin_unlock_bh(&rtwpci->irq_lock); in rtw_pci_napi_poll()
1690 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_init()
1692 init_dummy_netdev(&rtwpci->netdev); in rtw_pci_napi_init()
1693 netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll, in rtw_pci_napi_init()
1699 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_napi_deinit()
1702 netif_napi_del(&rtwpci->napi); in rtw_pci_napi_deinit()
1712 uintptr_t dis_caps = (uintptr_t)dmi->driver_data; in disable_pci_caps()
1734 .ident = "HP HP Pavilion Laptop 14-ce0xxx",
1737 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Laptop 14-ce0xxx"),
1755 dev_err(&pdev->dev, "failed to allocate hw\n"); in rtw_pci_probe()
1756 return -ENOMEM; in rtw_pci_probe()
1759 rtwdev = hw->priv; in rtw_pci_probe()
1760 rtwdev->hw = hw; in rtw_pci_probe()
1761 rtwdev->dev = &pdev->dev; in rtw_pci_probe()
1762 rtwdev->chip = (struct rtw_chip_info *)id->driver_data; in rtw_pci_probe()
1763 rtwdev->hci.ops = &rtw_pci_ops; in rtw_pci_probe()
1764 rtwdev->hci.type = RTW_HCI_TYPE_PCIE; in rtw_pci_probe()
1772 pdev->vendor, pdev->device, pdev->revision); in rtw_pci_probe()
1837 rtwdev = hw->priv; in rtw_pci_remove()
1838 rtwpci = (struct rtw_pci *)rtwdev->priv; in rtw_pci_remove()
1860 rtwdev = hw->priv; in rtw_pci_shutdown()
1861 chip = rtwdev->chip; in rtw_pci_shutdown()
1863 if (chip->ops->shutdown) in rtw_pci_shutdown()
1864 chip->ops->shutdown(rtwdev); in rtw_pci_shutdown()