Home
last modified time | relevance | path

Searched refs:nq (Results 1 – 25 of 31) sorted by relevance

12

/Linux-v5.10/drivers/block/
Dnull_blk_main.c614 static void put_tag(struct nullb_queue *nq, unsigned int tag) in put_tag() argument
616 clear_bit_unlock(tag, nq->tag_map); in put_tag()
618 if (waitqueue_active(&nq->wait)) in put_tag()
619 wake_up(&nq->wait); in put_tag()
622 static unsigned int get_tag(struct nullb_queue *nq) in get_tag() argument
627 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); in get_tag()
628 if (tag >= nq->queue_depth) in get_tag()
630 } while (test_and_set_bit_lock(tag, nq->tag_map)); in get_tag()
637 put_tag(cmd->nq, cmd->tag); in free_cmd()
642 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) in __alloc_cmd() argument
[all …]
Dnull_blk_zoned.c328 struct nullb_device *dev = cmd->nq->dev; in null_zone_write()
528 struct nullb_device *dev = cmd->nq->dev; in null_zone_mgmt()
581 struct nullb_device *dev = cmd->nq->dev; in null_process_zoned_cmd()
Dnull_blk.h21 struct nullb_queue *nq; member
/Linux-v5.10/drivers/infiniband/hw/bnxt_re/
Dqplib_fp.c158 struct bnxt_qplib_nq *nq = nq_work->nq; in bnxt_qpn_cqn_sched_task() local
160 if (cq && nq) { in bnxt_qpn_cqn_sched_task()
162 if (atomic_read(&cq->arm_state) && nq->cqn_handler) { in bnxt_qpn_cqn_sched_task()
163 dev_dbg(&nq->pdev->dev, in bnxt_qpn_cqn_sched_task()
165 __func__, cq, nq); in bnxt_qpn_cqn_sched_task()
166 nq->cqn_handler(nq, cq); in bnxt_qpn_cqn_sched_task()
234 static void clean_nq(struct bnxt_qplib_nq *nq, struct bnxt_qplib_cq *cq) in clean_nq() argument
236 struct bnxt_qplib_hwq *hwq = &nq->hwq; in clean_nq()
238 int budget = nq->budget; in clean_nq()
294 clean_nq(cq->nq, cq); in __wait_for_all_nqes()
[all …]
Dqplib_fp.h402 struct bnxt_qplib_nq *nq; member
464 typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
466 typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
489 struct bnxt_qplib_nq *nq; member
493 void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
494 void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
495 int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
497 int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
538 void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
539 int bnxt_qplib_alloc_nq(struct bnxt_qplib_res *res, struct bnxt_qplib_nq *nq);
Dmain.c261 struct bnxt_qplib_nq *nq; in bnxt_re_stop_irq() local
265 nq = &rdev->nq[indx - 1]; in bnxt_re_stop_irq()
266 bnxt_qplib_nq_stop_irq(nq, false); in bnxt_re_stop_irq()
277 struct bnxt_qplib_nq *nq; in bnxt_re_start_irq() local
299 nq = &rdev->nq[indx - 1]; in bnxt_re_start_irq()
300 rc = bnxt_qplib_nq_start_irq(nq, indx - 1, in bnxt_re_start_irq()
884 static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_srqn_handler() argument
914 static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, in bnxt_re_cqn_handler() argument
948 bnxt_qplib_disable_nq(&rdev->nq[i - 1]); in bnxt_re_cleanup_res()
964 rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1], in bnxt_re_init_res()
[all …]
Dib_verbs.c1575 struct bnxt_qplib_nq *nq = NULL; in bnxt_re_destroy_srq() local
1578 nq = qplib_srq->cq->nq; in bnxt_re_destroy_srq()
1582 if (nq) in bnxt_re_destroy_srq()
1583 nq->budget--; in bnxt_re_destroy_srq()
1624 struct bnxt_qplib_nq *nq = NULL; in bnxt_re_create_srq() local
1664 srq->qplib_srq.eventq_hw_ring_id = rdev->nq[0].ring_id; in bnxt_re_create_srq()
1665 nq = &rdev->nq[0]; in bnxt_re_create_srq()
1691 if (nq) in bnxt_re_create_srq()
1692 nq->budget++; in bnxt_re_create_srq()
2803 struct bnxt_qplib_nq *nq; in bnxt_re_destroy_cq() local
[all …]
Dbnxt_re.h163 struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX]; member
/Linux-v5.10/lib/crypto/
Dcurve25519-hacl64.c546 ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop_step() argument
551 point_swap_conditional(nq, nqpq, bit0); in ladder_smallloop_cmult_small_loop_step()
552 addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); in ladder_smallloop_cmult_small_loop_step()
558 ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, in ladder_smallloop_cmult_small_loop_double_step() argument
562 ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); in ladder_smallloop_cmult_small_loop_double_step()
564 ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); in ladder_smallloop_cmult_small_loop_double_step()
568 ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, in ladder_smallloop_cmult_small_loop() argument
572 ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, in ladder_smallloop_cmult_small_loop()
578 static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, in ladder_bigloop_cmult_big_loop() argument
585 ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, in ladder_bigloop_cmult_big_loop()
[all …]
/Linux-v5.10/fs/xfs/
Dxfs_trans_dquot.c77 struct xfs_dqtrx *oq, *nq; in xfs_trans_dup_dqinfo() local
103 nq = &nqa[i]; in xfs_trans_dup_dqinfo()
108 nq->qt_dquot = oq->qt_dquot; in xfs_trans_dup_dqinfo()
109 nq->qt_bcount_delta = nq->qt_icount_delta = 0; in xfs_trans_dup_dqinfo()
110 nq->qt_rtbcount_delta = 0; in xfs_trans_dup_dqinfo()
115 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; in xfs_trans_dup_dqinfo()
118 nq->qt_rtblk_res = oq->qt_rtblk_res - in xfs_trans_dup_dqinfo()
122 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; in xfs_trans_dup_dqinfo()
/Linux-v5.10/arch/x86/crypto/
Dcurve25519-x86_64.c766 u64 *nq = p01_tmp1; in point_add_and_double() local
770 u64 *x2 = nq; in point_add_and_double()
771 u64 *z2 = nq + (u32)4U; in point_add_and_double()
813 fmul2(nq, dc1, ab1, tmp2); in point_add_and_double()
817 static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) in point_double() argument
819 u64 *x2 = nq; in point_double()
820 u64 *z2 = nq + (u32)4U; in point_double()
837 fmul2(nq, dc, ab, tmp2); in point_double()
/Linux-v5.10/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4_uld.c111 unsigned int nq = rxq_info->nrxq + rxq_info->nciq; in alloc_uld_rxqs() local
125 for (i = 0; i < nq; i++, q++) { in alloc_uld_rxqs()
403 int nq = txq_info->ntxq; in free_sge_txq_uld() local
406 for (i = 0; i < nq; i++) { in free_sge_txq_uld()
426 int nq = txq_info->ntxq; in alloc_sge_txq_uld() local
429 j = nq / adap->params.nports; in alloc_sge_txq_uld()
430 for (i = 0; i < nq; i++) { in alloc_sge_txq_uld()
/Linux-v5.10/drivers/net/ethernet/marvell/
Dmvneta.c1834 struct netdev_queue *nq, bool napi) in mvneta_txq_bufs_free() argument
1864 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); in mvneta_txq_bufs_free()
1871 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_txq_done() local
1878 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true); in mvneta_txq_done()
1882 if (netif_tx_queue_stopped(nq)) { in mvneta_txq_done()
1884 netif_tx_wake_queue(nq); in mvneta_txq_done()
2090 struct netdev_queue *nq; in mvneta_xdp_xmit_back() local
2101 nq = netdev_get_tx_queue(pp->dev, txq->id); in mvneta_xdp_xmit_back()
2103 __netif_tx_lock(nq, cpu); in mvneta_xdp_xmit_back()
2118 __netif_tx_unlock(nq); in mvneta_xdp_xmit_back()
[all …]
Dmv643xx_eth.c495 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_maybe_wake() local
497 if (netif_tx_queue_stopped(nq)) { in txq_maybe_wake()
498 __netif_tx_lock(nq, smp_processor_id()); in txq_maybe_wake()
500 netif_tx_wake_queue(nq); in txq_maybe_wake()
501 __netif_tx_unlock(nq); in txq_maybe_wake()
995 struct netdev_queue *nq; in mv643xx_eth_xmit() local
999 nq = netdev_get_tx_queue(dev, queue); in mv643xx_eth_xmit()
1018 netif_tx_stop_queue(nq); in mv643xx_eth_xmit()
1032 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); in txq_kick() local
1036 __netif_tx_lock(nq, smp_processor_id()); in txq_kick()
[all …]
/Linux-v5.10/kernel/rcu/
Dtasks.h778 int nq = t->trc_reader_special.b.need_qs; in rcu_read_unlock_trace_special() local
784 if (nq) in rcu_read_unlock_trace_special()
787 if (nq && atomic_dec_and_test(&trc_n_readers_need_end)) in rcu_read_unlock_trace_special()
/Linux-v5.10/drivers/net/ethernet/chelsio/cxgb4vf/
Dt4vf_hw.c1271 int nq = min(n, 32); in t4vf_config_rss_range() local
1278 cmd.niqid = cpu_to_be16(nq); in t4vf_config_rss_range()
1284 start += nq; in t4vf_config_rss_range()
1285 n -= nq; in t4vf_config_rss_range()
1292 while (nq > 0) { in t4vf_config_rss_range()
1301 int nqbuf = min(3, nq); in t4vf_config_rss_range()
1303 nq -= nqbuf; in t4vf_config_rss_range()
/Linux-v5.10/Documentation/devicetree/bindings/display/
Darm,komeda.txt18 Required properties for sub-node: pipeline@nq
/Linux-v5.10/arch/s390/mm/
Dpgtable.c773 unsigned char key, bool nq) in set_guest_storage_key() argument
822 page_set_storage_key(paddr, skey, !nq); in set_guest_storage_key()
846 bool nq, bool mr, bool mc) in cond_set_guest_storage_key() argument
865 rc = set_guest_storage_key(current->mm, addr, key, nq); in cond_set_guest_storage_key()
/Linux-v5.10/drivers/net/
Dtap.c190 struct tap_queue *nq; in tap_disable_queue() local
201 nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); in tap_disable_queue()
202 nq->queue_index = index; in tap_disable_queue()
204 rcu_assign_pointer(tap->taps[index], nq); in tap_disable_queue()
/Linux-v5.10/drivers/net/ethernet/freescale/
Dfec_main.c798 struct netdev_queue *nq; in fec_enet_start_xmit() local
803 nq = netdev_get_tx_queue(ndev, queue); in fec_enet_start_xmit()
814 netif_tx_stop_queue(nq); in fec_enet_start_xmit()
1243 struct netdev_queue *nq; in fec_enet_tx_queue() local
1251 nq = netdev_get_tx_queue(ndev, queue_id); in fec_enet_tx_queue()
1331 if (netif_tx_queue_stopped(nq)) { in fec_enet_tx_queue()
1334 netif_tx_wake_queue(nq); in fec_enet_tx_queue()
/Linux-v5.10/arch/s390/kvm/
Dpriv.c1013 bool mr = false, mc = false, nq; in handle_pfmf() local
1043 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ; in handle_pfmf()
1095 key, NULL, nq, mr, mc); in handle_pfmf()
/Linux-v5.10/net/sched/
Dsch_api.c318 struct netdev_queue *nq; in qdisc_lookup_rcu() local
327 nq = dev_ingress_queue_rcu(dev); in qdisc_lookup_rcu()
328 if (nq) in qdisc_lookup_rcu()
329 q = qdisc_match_from_root(nq->qdisc_sleeping, handle); in qdisc_lookup_rcu()
/Linux-v5.10/drivers/net/ethernet/freescale/dpaa2/
Ddpaa2-eth.c1073 struct netdev_queue *nq; in __dpaa2_eth_tx() local
1144 nq = netdev_get_tx_queue(net_dev, queue_mapping); in __dpaa2_eth_tx()
1145 netdev_tx_sent_queue(nq, fd_len); in __dpaa2_eth_tx()
1160 netdev_tx_completed_queue(nq, 1, fd_len); in __dpaa2_eth_tx()
1510 struct netdev_queue *nq; in dpaa2_eth_poll() local
1571 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); in dpaa2_eth_poll()
1572 netdev_tx_completed_queue(nq, txc_fq->dq_frames, in dpaa2_eth_poll()
/Linux-v5.10/drivers/net/ethernet/marvell/mvpp2/
Dmvpp2_main.c2483 struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id); in mvpp2_txq_done() local
2496 if (netif_tx_queue_stopped(nq)) in mvpp2_txq_done()
2498 netif_tx_wake_queue(nq); in mvpp2_txq_done()
2986 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq) in mvpp2_isr_handle_ptp_queue() argument
2996 if (nq) in mvpp2_isr_handle_ptp_queue()
2999 queue = &port->tx_hwtstamp_queue[nq]; in mvpp2_isr_handle_ptp_queue()
3260 struct netdev_queue *nq; in mvpp2_xdp_finish_tx() local
3264 nq = netdev_get_tx_queue(port->dev, txq_id); in mvpp2_xdp_finish_tx()
3276 netif_tx_stop_queue(nq); in mvpp2_xdp_finish_tx()
4009 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); in mvpp2_tx() local
[all …]
/Linux-v5.10/arch/s390/include/asm/
Dpgtable.h1152 unsigned char key, bool nq);
1155 bool nq, bool mr, bool mc);

12