/Linux-v5.10/drivers/infiniband/hw/bnxt_re/ |
D | qplib_res.h | 71 #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) argument 73 #define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \ argument 74 ((HWQ_CMP(hwq->prod, hwq)\ 75 - HWQ_CMP(hwq->cons, hwq))\ 76 & (hwq->max_elements - 1))) 173 struct bnxt_qplib_hwq *hwq; member 291 static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq) in bnxt_qplib_base_pg_size() argument 296 pbl = &hwq->pbl[PBL_LVL_0]; in bnxt_qplib_base_pg_size() 323 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq, in bnxt_qplib_get_qe() argument 328 pg_num = (indx / hwq->qe_ppg); in bnxt_qplib_get_qe() [all …]
|
D | qplib_fp.c | 75 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 83 dev_dbg(&rcq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 142 qp->sq.hwq.prod = 0; in bnxt_qplib_clean_qp() 143 qp->sq.hwq.cons = 0; in bnxt_qplib_clean_qp() 145 qp->rq.hwq.prod = 0; in bnxt_qplib_clean_qp() 146 qp->rq.hwq.cons = 0; in bnxt_qplib_clean_qp() 236 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq() local 243 spin_lock_bh(&hwq->lock); in clean_nq() 245 raw_cons = hwq->cons; in clean_nq() 247 sw_cons = HWQ_CMP(raw_cons, hwq); in clean_nq() [all …]
|
D | qplib_res.c | 156 struct bnxt_qplib_hwq *hwq) in bnxt_qplib_free_hwq() argument 160 if (!hwq->max_elements) in bnxt_qplib_free_hwq() 162 if (hwq->level >= PBL_LVL_MAX) in bnxt_qplib_free_hwq() 165 for (i = 0; i < hwq->level + 1; i++) { in bnxt_qplib_free_hwq() 166 if (i == hwq->level) in bnxt_qplib_free_hwq() 167 __free_pbl(res, &hwq->pbl[i], hwq->is_user); in bnxt_qplib_free_hwq() 169 __free_pbl(res, &hwq->pbl[i], false); in bnxt_qplib_free_hwq() 172 hwq->level = PBL_LVL_MAX; in bnxt_qplib_free_hwq() 173 hwq->max_elements = 0; in bnxt_qplib_free_hwq() 174 hwq->element_size = 0; in bnxt_qplib_free_hwq() [all …]
|
D | qplib_rcfw.c | 92 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message() local 126 spin_lock_irqsave(&hwq->lock, flags); in __send_message() 127 if (req->cmd_size >= HWQ_FREE_SLOTS(hwq)) { in __send_message() 129 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message() 143 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message() 168 sw_prod = HWQ_CMP(hwq->prod, hwq); in __send_message() 169 cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL); in __send_message() 180 hwq->prod++; in __send_message() 184 cmdq_prod = hwq->prod; in __send_message() 200 spin_unlock_irqrestore(&hwq->lock, flags); in __send_message() [all …]
|
D | qplib_fp.h | 99 struct bnxt_qplib_hwq hwq; member 247 struct bnxt_qplib_hwq hwq; member 356 struct bnxt_qplib_hwq *hwq; in bnxt_qplib_queue_full() local 359 hwq = &que->hwq; in bnxt_qplib_queue_full() 361 avail = hwq->cons - hwq->prod; in bnxt_qplib_queue_full() 362 if (hwq->cons <= hwq->prod) in bnxt_qplib_queue_full() 363 avail += hwq->depth; in bnxt_qplib_queue_full() 400 struct bnxt_qplib_hwq hwq; member 473 struct bnxt_qplib_hwq hwq; member
|
D | qplib_sp.c | 584 if (mrw->hwq.max_elements) in bnxt_qplib_free_mrw() 585 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_free_mrw() 643 if (mrw->hwq.max_elements) { in bnxt_qplib_dereg_mrw() 646 bnxt_qplib_free_hwq(res, &mrw->hwq); in bnxt_qplib_dereg_mrw() 681 if (mr->hwq.max_elements) in bnxt_qplib_reg_mr() 682 bnxt_qplib_free_hwq(res, &mr->hwq); in bnxt_qplib_reg_mr() 692 rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr); in bnxt_qplib_reg_mr() 699 pbl_ptr = (dma_addr_t **)mr->hwq.pbl_ptr; in bnxt_qplib_reg_mr() 708 if (mr->hwq.level == PBL_LVL_MAX) { in bnxt_qplib_reg_mr() 714 level = mr->hwq.level + 1; in bnxt_qplib_reg_mr() [all …]
|
D | qplib_rcfw.h | 148 struct bnxt_qplib_hwq hwq; member 168 struct bnxt_qplib_hwq hwq; member
|
D | qplib_sp.h | 119 struct bnxt_qplib_hwq hwq; member 124 struct bnxt_qplib_hwq hwq; member
|
D | main.c | 1039 nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT; in bnxt_re_alloc_res() 1047 rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_alloc_res() 1048 rattr.pages = nq->hwq.pbl[rdev->nq[i].hwq.level].pg_count; in bnxt_re_alloc_res() 1452 rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr; in bnxt_re_dev_init() 1453 rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count; in bnxt_re_dev_init()
|
D | ib_verbs.c | 2467 wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0]; in bnxt_re_build_reg_wqe() 2468 wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0]; in bnxt_re_build_reg_wqe() 2471 wqe->frmr.levels = qplib_frpl->hwq.level; in bnxt_re_build_reg_wqe() 2901 resp.tail = cq->qplib_cq.hwq.cons; in bnxt_re_create_cq() 3407 lib_qp->id, lib_qp->sq.hwq.prod, in send_phantom_wqe() 3408 HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), in send_phantom_wqe() 3600 mr->qplib_mr.hwq.level = PBL_LVL_MAX; in bnxt_re_get_dma_mr()
|
/Linux-v5.10/drivers/scsi/cxlflash/ |
D | main.c | 159 struct hwq *hwq = get_hwq(afu, cmd->hwq_index); in cmd_complete() local 161 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); in cmd_complete() 163 spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags); in cmd_complete() 191 static void flush_pending_cmds(struct hwq *hwq) in flush_pending_cmds() argument 193 struct cxlflash_cfg *cfg = hwq->afu->parent; in flush_pending_cmds() 198 list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { in flush_pending_cmds() 235 static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) in context_reset() argument 237 struct cxlflash_cfg *cfg = hwq->afu->parent; in context_reset() 244 dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq); in context_reset() 246 spin_lock_irqsave(&hwq->hsq_slock, lock_flags); in context_reset() [all …]
|
D | common.h | 196 struct hwq { struct 231 struct hwq hwqs[CXLFLASH_MAX_HWQS]; argument 233 int (*context_reset)(struct hwq *hwq); 255 static inline struct hwq *get_hwq(struct afu *afu, u32 index) in get_hwq()
|
D | superpipe.c | 267 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in afu_attach() local 291 val = hwq->ctx_hndl; in afu_attach() 298 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(hwq->ctx_hndl)); in afu_attach() 1659 struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); in cxlflash_afu_recover() local 1736 reg = readq_be(&hwq->ctrl_map->mbox_r); in cxlflash_afu_recover()
|
/Linux-v5.10/drivers/net/wireless/mediatek/mt76/mt7915/ |
D | dma.c | 11 struct mt76_queue *hwq; in mt7915_init_tx_queues() local 14 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); in mt7915_init_tx_queues() 15 if (!hwq) in mt7915_init_tx_queues() 18 err = mt76_queue_alloc(dev, hwq, MT7915_TXQ_BAND0, n_desc, 0, in mt7915_init_tx_queues() 24 dev->mt76.q_tx[i] = hwq; in mt7915_init_tx_queues() 32 struct mt76_queue *hwq; in mt7915_init_mcu_queue() local 35 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); in mt7915_init_mcu_queue() 36 if (!hwq) in mt7915_init_mcu_queue() 39 err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); in mt7915_init_mcu_queue() 43 dev->mt76.q_tx[qid] = hwq; in mt7915_init_mcu_queue()
|
/Linux-v5.10/include/scsi/ |
D | scsi_tcq.h | 26 u16 hwq; in scsi_host_find_tag() local 31 hwq = blk_mq_unique_tag_to_hwq(tag); in scsi_host_find_tag() 32 if (hwq < shost->tag_set.nr_hw_queues) { in scsi_host_find_tag() 33 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], in scsi_host_find_tag()
|
/Linux-v5.10/drivers/net/wireless/mediatek/mt76/mt7603/ |
D | dma.c | 10 struct mt76_queue *hwq; in mt7603_init_tx_queue() local 13 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); in mt7603_init_tx_queue() 14 if (!hwq) in mt7603_init_tx_queue() 17 err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); in mt7603_init_tx_queue() 21 dev->mt76.q_tx[qid] = hwq; in mt7603_init_tx_queue()
|
/Linux-v5.10/drivers/net/wireless/mediatek/mt76/mt7615/ |
D | dma.c | 17 struct mt76_queue *hwq; in mt7615_init_tx_queue() local 20 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); in mt7615_init_tx_queue() 21 if (!hwq) in mt7615_init_tx_queue() 24 err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); in mt7615_init_tx_queue() 28 dev->mt76.q_tx[qid] = hwq; in mt7615_init_tx_queue()
|
/Linux-v5.10/drivers/net/wireless/mediatek/mt76/ |
D | tx.c | 347 struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD]; in mt76_release_buffered_frames() local 350 spin_lock_bh(&hwq->lock); in mt76_release_buffered_frames() 374 dev->queue_ops->kick(dev, hwq); in mt76_release_buffered_frames() 379 spin_unlock_bh(&hwq->lock); in mt76_release_buffered_frames() 548 struct mt76_queue *hwq; in mt76_stop_tx_queues() local 554 hwq = dev->q_tx[mt76_txq_get_qid(txq)]; in mt76_stop_tx_queues() 557 spin_lock_bh(&hwq->lock); in mt76_stop_tx_queues() 559 spin_unlock_bh(&hwq->lock); in mt76_stop_tx_queues()
|
D | mt76x02_mmio.c | 109 struct mt76_queue *hwq; in mt76x02_init_tx_queue() local 112 hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL); in mt76x02_init_tx_queue() 113 if (!hwq) in mt76x02_init_tx_queue() 116 err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE); in mt76x02_init_tx_queue() 120 dev->mt76.q_tx[qid] = hwq; in mt76x02_init_tx_queue()
|
/Linux-v5.10/drivers/net/wireless/ti/wlcore/ |
D | tx.c | 1201 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_stop_queue_locked() local 1202 bool stopped = !!wl->queue_stop_reasons[hwq]; in wlcore_stop_queue_locked() 1205 WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_stop_queue_locked() 1210 ieee80211_stop_queue(wl->hw, hwq); in wlcore_stop_queue_locked() 1227 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_wake_queue() local 1232 WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq])); in wlcore_wake_queue() 1234 if (wl->queue_stop_reasons[hwq]) in wlcore_wake_queue() 1237 ieee80211_wake_queue(wl->hw, hwq); in wlcore_wake_queue() 1304 int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue); in wlcore_is_queue_stopped_by_reason_locked() local 1307 return test_bit(reason, &wl->queue_stop_reasons[hwq]); in wlcore_is_queue_stopped_by_reason_locked() [all …]
|
/Linux-v5.10/drivers/scsi/ |
D | virtio_scsi.c | 549 u16 hwq = blk_mq_unique_tag_to_hwq(tag); in virtscsi_pick_vq_mq() local 551 return &vscsi->req_vqs[hwq]; in virtscsi_pick_vq_mq() 724 static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) in virtscsi_commit_rqs() argument 728 virtscsi_kick_vq(&vscsi->req_vqs[hwq]); in virtscsi_commit_rqs()
|
D | scsi_debug.c | 4703 u16 hwq; in get_queue() local 4706 hwq = blk_mq_unique_tag_to_hwq(tag); in get_queue() 4708 pr_debug("tag=%#x, hwq=%d\n", tag, hwq); in get_queue() 4709 if (WARN_ON_ONCE(hwq >= submit_queues)) in get_queue() 4710 hwq = 0; in get_queue() 4712 return sdebug_q_arr + hwq; in get_queue()
|
/Linux-v5.10/drivers/net/wireless/intel/iwlegacy/ |
D | common.h | 2245 il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq) in il_set_swq_id() argument 2248 BUG_ON(hwq > 31); /* only use 5 bits */ in il_set_swq_id() 2250 txq->swq_id = (hwq << 2) | ac; in il_set_swq_id() 2271 u8 hwq = (queue >> 2) & 0x1f; in il_wake_queue() local 2273 if (test_and_clear_bit(hwq, il->queue_stopped)) in il_wake_queue() 2282 u8 hwq = (queue >> 2) & 0x1f; in il_stop_queue() local 2284 if (!test_and_set_bit(hwq, il->queue_stopped)) in il_stop_queue()
|
/Linux-v5.10/drivers/block/ |
D | nbd.c | 685 u16 hwq; in nbd_read_stat() local 709 hwq = blk_mq_unique_tag_to_hwq(tag); in nbd_read_stat() 710 if (hwq < nbd->tag_set.nr_hw_queues) in nbd_read_stat() 711 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], in nbd_read_stat()
|
D | skd_main.c | 1465 u16 hwq = 0; in skd_isr_completion_posted() local 1521 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], in skd_isr_completion_posted() 1527 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag); in skd_isr_completion_posted()
|