Lines Matching refs:nhi
47 bit += ring->nhi->hop_count; in ring_interrupt_index()
72 index = ring->hop + ring->nhi->hop_count; in ring_interrupt_active()
78 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
81 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); in ring_interrupt_active()
84 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; in ring_interrupt_active()
94 old = ioread32(ring->nhi->iobase + reg); in ring_interrupt_active()
100 dev_dbg(&ring->nhi->pdev->dev, in ring_interrupt_active()
105 dev_WARN(&ring->nhi->pdev->dev, in ring_interrupt_active()
109 iowrite32(new, ring->nhi->iobase + reg); in ring_interrupt_active()
117 static void nhi_disable_interrupts(struct tb_nhi *nhi) in nhi_disable_interrupts() argument
121 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) in nhi_disable_interrupts()
122 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); in nhi_disable_interrupts()
125 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) in nhi_disable_interrupts()
126 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); in nhi_disable_interrupts()
133 void __iomem *io = ring->nhi->iobase; in ring_desc_base()
141 void __iomem *io = ring->nhi->iobase; in ring_options_base()
343 val = ioread32(ring->nhi->iobase + reg); in __ring_interrupt_mask()
348 iowrite32(val, ring->nhi->iobase + reg); in __ring_interrupt_mask()
376 spin_lock_irqsave(&ring->nhi->lock, flags); in tb_ring_poll_complete()
381 spin_unlock_irqrestore(&ring->nhi->lock, flags); in tb_ring_poll_complete()
389 spin_lock(&ring->nhi->lock); in ring_msix()
393 spin_unlock(&ring->nhi->lock); in ring_msix()
400 struct tb_nhi *nhi = ring->nhi; in ring_request_msix() local
404 if (!nhi->pdev->msix_enabled) in ring_request_msix()
407 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL); in ring_request_msix()
413 ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); in ring_request_msix()
427 ida_simple_remove(&ring->nhi->msix_ida, ring->vector); in ring_release_msix()
432 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) in nhi_alloc_hop() argument
436 spin_lock_irq(&nhi->lock); in nhi_alloc_hop()
445 for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { in nhi_alloc_hop()
447 if (!nhi->tx_rings[i]) { in nhi_alloc_hop()
452 if (!nhi->rx_rings[i]) { in nhi_alloc_hop()
460 if (ring->hop < 0 || ring->hop >= nhi->hop_count) { in nhi_alloc_hop()
461 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); in nhi_alloc_hop()
465 if (ring->is_tx && nhi->tx_rings[ring->hop]) { in nhi_alloc_hop()
466 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n", in nhi_alloc_hop()
470 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) { in nhi_alloc_hop()
471 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n", in nhi_alloc_hop()
478 nhi->tx_rings[ring->hop] = ring; in nhi_alloc_hop()
480 nhi->rx_rings[ring->hop] = ring; in nhi_alloc_hop()
483 spin_unlock_irq(&nhi->lock); in nhi_alloc_hop()
488 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, in tb_ring_alloc() argument
496 dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", in tb_ring_alloc()
512 ring->nhi = nhi; in tb_ring_alloc()
525 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
534 if (nhi_alloc_hop(nhi, ring)) in tb_ring_alloc()
542 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_alloc()
558 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, in tb_ring_alloc_tx() argument
561 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); in tb_ring_alloc_tx()
578 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, in tb_ring_alloc_rx() argument
582 return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, in tb_ring_alloc_rx()
597 spin_lock_irq(&ring->nhi->lock); in tb_ring_start()
599 if (ring->nhi->going_away) in tb_ring_start()
602 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); in tb_ring_start()
605 dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n", in tb_ring_start()
646 spin_unlock_irq(&ring->nhi->lock); in tb_ring_start()
665 spin_lock_irq(&ring->nhi->lock); in tb_ring_stop()
667 dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n", in tb_ring_stop()
669 if (ring->nhi->going_away) in tb_ring_stop()
672 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", in tb_ring_stop()
688 spin_unlock_irq(&ring->nhi->lock); in tb_ring_stop()
710 spin_lock_irq(&ring->nhi->lock); in tb_ring_free()
716 ring->nhi->tx_rings[ring->hop] = NULL; in tb_ring_free()
718 ring->nhi->rx_rings[ring->hop] = NULL; in tb_ring_free()
721 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", in tb_ring_free()
724 spin_unlock_irq(&ring->nhi->lock); in tb_ring_free()
728 dma_free_coherent(&ring->nhi->pdev->dev, in tb_ring_free()
736 dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), in tb_ring_free()
758 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data) in nhi_mailbox_cmd() argument
763 iowrite32(data, nhi->iobase + REG_INMAIL_DATA); in nhi_mailbox_cmd()
765 val = ioread32(nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
768 iowrite32(val, nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
772 val = ioread32(nhi->iobase + REG_INMAIL_CMD); in nhi_mailbox_cmd()
793 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi) in nhi_mailbox_mode() argument
797 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD); in nhi_mailbox_mode()
806 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); in nhi_interrupt_work() local
813 spin_lock_irq(&nhi->lock); in nhi_interrupt_work()
820 for (bit = 0; bit < 3 * nhi->hop_count; bit++) { in nhi_interrupt_work()
822 value = ioread32(nhi->iobase in nhi_interrupt_work()
825 if (++hop == nhi->hop_count) { in nhi_interrupt_work()
832 dev_warn(&nhi->pdev->dev, in nhi_interrupt_work()
838 ring = nhi->tx_rings[hop]; in nhi_interrupt_work()
840 ring = nhi->rx_rings[hop]; in nhi_interrupt_work()
842 dev_warn(&nhi->pdev->dev, in nhi_interrupt_work()
853 spin_unlock_irq(&nhi->lock); in nhi_interrupt_work()
858 struct tb_nhi *nhi = data; in nhi_msi() local
859 schedule_work(&nhi->interrupt_work); in nhi_msi()
867 struct tb_nhi *nhi = tb->nhi; in __nhi_suspend_noirq() local
874 if (nhi->ops && nhi->ops->suspend_noirq) { in __nhi_suspend_noirq()
875 ret = nhi->ops->suspend_noirq(tb->nhi, wakeup); in __nhi_suspend_noirq()
911 static void nhi_enable_int_throttling(struct tb_nhi *nhi) in nhi_enable_int_throttling() argument
923 iowrite32(throttle, nhi->iobase + reg); in nhi_enable_int_throttling()
931 struct tb_nhi *nhi = tb->nhi; in nhi_resume_noirq() local
940 nhi->going_away = true; in nhi_resume_noirq()
942 if (nhi->ops && nhi->ops->resume_noirq) { in nhi_resume_noirq()
943 ret = nhi->ops->resume_noirq(nhi); in nhi_resume_noirq()
947 nhi_enable_int_throttling(tb->nhi); in nhi_resume_noirq()
981 struct tb_nhi *nhi = tb->nhi; in nhi_runtime_suspend() local
988 if (nhi->ops && nhi->ops->runtime_suspend) { in nhi_runtime_suspend()
989 ret = nhi->ops->runtime_suspend(tb->nhi); in nhi_runtime_suspend()
1000 struct tb_nhi *nhi = tb->nhi; in nhi_runtime_resume() local
1003 if (nhi->ops && nhi->ops->runtime_resume) { in nhi_runtime_resume()
1004 ret = nhi->ops->runtime_resume(nhi); in nhi_runtime_resume()
1009 nhi_enable_int_throttling(nhi); in nhi_runtime_resume()
1013 static void nhi_shutdown(struct tb_nhi *nhi) in nhi_shutdown() argument
1017 dev_dbg(&nhi->pdev->dev, "shutdown\n"); in nhi_shutdown()
1019 for (i = 0; i < nhi->hop_count; i++) { in nhi_shutdown()
1020 if (nhi->tx_rings[i]) in nhi_shutdown()
1021 dev_WARN(&nhi->pdev->dev, in nhi_shutdown()
1023 if (nhi->rx_rings[i]) in nhi_shutdown()
1024 dev_WARN(&nhi->pdev->dev, in nhi_shutdown()
1027 nhi_disable_interrupts(nhi); in nhi_shutdown()
1032 if (!nhi->pdev->msix_enabled) { in nhi_shutdown()
1033 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); in nhi_shutdown()
1034 flush_work(&nhi->interrupt_work); in nhi_shutdown()
1036 ida_destroy(&nhi->msix_ida); in nhi_shutdown()
1038 if (nhi->ops && nhi->ops->shutdown) in nhi_shutdown()
1039 nhi->ops->shutdown(nhi); in nhi_shutdown()
1042 static int nhi_init_msi(struct tb_nhi *nhi) in nhi_init_msi() argument
1044 struct pci_dev *pdev = nhi->pdev; in nhi_init_msi()
1048 nhi_disable_interrupts(nhi); in nhi_init_msi()
1050 nhi_enable_int_throttling(nhi); in nhi_init_msi()
1052 ida_init(&nhi->msix_ida); in nhi_init_msi()
1067 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); in nhi_init_msi()
1069 irq = pci_irq_vector(nhi->pdev, 0); in nhi_init_msi()
1074 IRQF_NO_SUSPEND, "thunderbolt", nhi); in nhi_init_msi()
1096 struct tb_nhi *nhi; in nhi_probe() local
1117 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); in nhi_probe()
1118 if (!nhi) in nhi_probe()
1121 nhi->pdev = pdev; in nhi_probe()
1122 nhi->ops = (const struct tb_nhi_ops *)id->driver_data; in nhi_probe()
1124 nhi->iobase = pcim_iomap_table(pdev)[0]; in nhi_probe()
1125 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; in nhi_probe()
1126 if (nhi->hop_count != 12 && nhi->hop_count != 32) in nhi_probe()
1128 nhi->hop_count); in nhi_probe()
1130 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1131 sizeof(*nhi->tx_rings), GFP_KERNEL); in nhi_probe()
1132 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, in nhi_probe()
1133 sizeof(*nhi->rx_rings), GFP_KERNEL); in nhi_probe()
1134 if (!nhi->tx_rings || !nhi->rx_rings) in nhi_probe()
1137 res = nhi_init_msi(nhi); in nhi_probe()
1143 spin_lock_init(&nhi->lock); in nhi_probe()
1155 if (nhi->ops && nhi->ops->init) { in nhi_probe()
1156 res = nhi->ops->init(nhi); in nhi_probe()
1161 tb = icm_probe(nhi); in nhi_probe()
1163 tb = tb_probe(nhi); in nhi_probe()
1165 dev_err(&nhi->pdev->dev, in nhi_probe()
1170 dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); in nhi_probe()
1179 nhi_shutdown(nhi); in nhi_probe()
1195 struct tb_nhi *nhi = tb->nhi; in nhi_remove() local
1202 nhi_shutdown(nhi); in nhi_remove()