/Linux-v4.19/fs/ocfs2/cluster/ |
D | quorum.c | 109 struct o2quo_state *qs = &o2quo_state; in o2quo_make_decision() local 111 spin_lock(&qs->qs_lock); in o2quo_make_decision() 113 lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); in o2quo_make_decision() 115 lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); in o2quo_make_decision() 118 "lowest: %d (%sreachable)\n", qs->qs_heartbeating, in o2quo_make_decision() 119 qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); in o2quo_make_decision() 121 if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || in o2quo_make_decision() 122 qs->qs_heartbeating == 1) in o2quo_make_decision() 125 if (qs->qs_heartbeating & 1) { in o2quo_make_decision() 128 quorum = (qs->qs_heartbeating + 1)/2; in o2quo_make_decision() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 730 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) in init_qset_cntxt() argument 732 qs->rspq.cntxt_id = id; in init_qset_cntxt() 733 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt() 734 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt() 735 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt() 736 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt() 737 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt() 738 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt() 739 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt() 1249 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument [all …]
|
D | cxgb3_main.c | 412 adap->sge.qs[qidx]. in request_msix_data_irqs() 415 &adap->sge.qs[qidx]); in request_msix_data_irqs() 419 &adap->sge.qs[qidx]); in request_msix_data_irqs() 439 &adapter->sge.qs[i]); in free_irq_resources() 449 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies() 462 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity() 598 struct sge_qset *qs = &adap->sge.qs[i]; in ring_dbs() local 600 if (qs->adap) in ring_dbs() 602 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs() 611 struct sge_qset *qs = &adap->sge.qs[i]; in init_napi() local [all …]
|
D | adapter.h | 68 struct sge_qset *qs; member 216 struct sge_qset qs[SGE_QSETS]; member 324 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
|
/Linux-v4.19/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 386 struct queue_set *qs = nic->qs; in nicvf_refill_rbdr() local 387 int rbdr_idx = qs->rbdr_cnt; in nicvf_refill_rbdr() 399 rbdr = &qs->rbdr[rbdr_idx]; in nicvf_refill_rbdr() 408 if (qcount >= (qs->rbdr_len - 1)) in nicvf_refill_rbdr() 411 refill_rb_cnt = qs->rbdr_len - qcount - 1; in nicvf_refill_rbdr() 630 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument 642 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument 652 struct queue_set *qs, int qidx) in nicvf_reclaim_cmp_queue() argument 747 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, in nicvf_rcv_queue_config() argument 754 rq = &qs->rq[qidx]; in nicvf_rcv_queue_config() [all …]
|
D | nicvf_ethtool.c | 221 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 229 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 286 (nic->qs->rq_cnt + nic->qs->sq_cnt); in nicvf_get_sset_count() 294 (snic->qs->rq_cnt + snic->qs->sq_cnt); in nicvf_get_sset_count() 310 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 313 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 317 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() 320 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) in nicvf_get_qset_stats() 475 struct queue_set *qs = nic->qs; in nicvf_get_ringparam() local 478 ring->rx_pending = qs->cq_len; in nicvf_get_ringparam() [all …]
|
D | nicvf_main.c | 300 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; in nicvf_config_cpi() 422 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; in nicvf_request_sqs() 425 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; in nicvf_request_sqs() 430 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; in nicvf_request_sqs() 433 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; in nicvf_request_sqs() 437 nic->snicvf[sqs]->qs->cq_cnt = in nicvf_request_sqs() 438 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); in nicvf_request_sqs() 649 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler() 834 struct queue_set *qs = nic->qs; in nicvf_cq_intr_handler() local 835 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler() [all …]
|
D | nic.h | 286 struct queue_set *qs; member 588 struct qs_cfg_msg qs; member
|
/Linux-v4.19/fs/qnx4/ |
D | inode.c | 45 struct qnx4_sb_info *qs; in qnx4_remount() local 48 qs = qnx4_sb(sb); in qnx4_remount() 49 qs->Version = QNX4_VERSION; in qnx4_remount() 191 struct qnx4_sb_info *qs; in qnx4_fill_super() local 193 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); in qnx4_fill_super() 194 if (!qs) in qnx4_fill_super() 196 s->s_fs_info = qs; in qnx4_fill_super() 238 struct qnx4_sb_info *qs = qnx4_sb(sb); in qnx4_kill_sb() local 240 if (qs) { in qnx4_kill_sb() 241 kfree(qs->BitMap); in qnx4_kill_sb() [all …]
|
/Linux-v4.19/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | cxgb4vf_main.c | 313 int qs, msi; in name_msix_vecs() local 315 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs() 317 "%s-%d", dev->name, qs); in name_msix_vecs() 580 int qs; in setup_sge_queues() local 582 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 590 netdev_get_tx_queue(dev, qs), in setup_sge_queues() 595 rxq->rspq.idx = qs; in setup_sge_queues() 611 int qs; in setup_sge_queues() local 613 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 653 int qs, err; in setup_rss() local [all …]
|
/Linux-v4.19/drivers/net/ethernet/hisilicon/hns/ |
D | hns_ae_adapt.c | 117 ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1); in hns_ae_get_handle() 119 ae_handle->qs[i] = &ring_pair_cb->q; in hns_ae_get_handle() 120 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 121 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 155 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; in hns_ae_put_handle() 167 ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]); in hns_ae_wait_flow_down() 199 hns_rcb_ring_enable_hw(handle->qs[i], val); in hns_ae_ring_enable_all() 325 q = handle->qs[i]; in hns_ae_set_mtu() 353 hns_rcb_int_clr_hw(handle->qs[k], in hns_ae_start() 356 hns_rcbv2_int_clr_hw(handle->qs[k], in hns_ae_start() [all …]
|
D | hnae.c | 300 hnae_fini_queue(handle->qs[i]); in hnae_reinit_handle() 306 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); in hnae_reinit_handle() 313 hnae_fini_queue(handle->qs[j]); in hnae_reinit_handle() 352 ret = hnae_init_queue(handle, handle->qs[i], dev); in hnae_get_handle() 365 hnae_fini_queue(handle->qs[j]); in hnae_get_handle() 379 hnae_fini_queue(h->qs[i]); in hnae_put_handle()
|
D | hns_dsaf_rcb.h | 138 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag); 139 int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
|
D | hns_dsaf_rcb.c | 44 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) in hns_rcb_wait_fbd_clean() argument 53 fbd_num += dsaf_read_dev(qs[i], in hns_rcb_wait_fbd_clean() 56 fbd_num += dsaf_read_dev(qs[i], in hns_rcb_wait_fbd_clean() 65 dev_err(qs[i]->handle->owner_dev, in hns_rcb_wait_fbd_clean() 69 int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs) in hns_rcb_wait_tx_ring_clean() argument 74 tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL); in hns_rcb_wait_tx_ring_clean() 77 head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD); in hns_rcb_wait_tx_ring_clean() 85 dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n"); in hns_rcb_wait_tx_ring_clean()
|
D | hns_enet.c | 378 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); in hns_nic_net_xmit_hw() 1647 ring = &h->qs[i]->rx_ring; in hns_nic_clear_all_rx_fetch() 1898 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; in hns_nic_get_stats64() 1899 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; in hns_nic_get_stats64() 1900 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; in hns_nic_get_stats64() 1901 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; in hns_nic_get_stats64() 2000 i, h->qs[i]->tx_ring.next_to_clean); in hns_nic_dump() 2002 i, h->qs[i]->tx_ring.next_to_use); in hns_nic_dump() 2004 i, h->qs[i]->rx_ring.next_to_clean); in hns_nic_dump() 2006 i, h->qs[i]->rx_ring.next_to_use); in hns_nic_dump() [all …]
|
D | hnae.h | 574 struct hnae_queue **qs; /* array base of all queues */ member 678 ring = &h->qs[i]->rx_ring; in hnae_reinit_all_ring_desc() 693 ring = &h->qs[i]->rx_ring; in hnae_reinit_all_ring_page_off()
|
/Linux-v4.19/fs/qnx6/ |
D | inode.c | 304 struct qnx6_sb_info *qs; in qnx6_fill_super() local 309 qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); in qnx6_fill_super() 310 if (!qs) in qnx6_fill_super() 312 s->s_fs_info = qs; in qnx6_fill_super() 477 kfree(qs); in qnx6_fill_super() 484 struct qnx6_sb_info *qs = QNX6_SB(sb); in qnx6_put_super() local 485 brelse(qs->sb_buf); in qnx6_put_super() 486 iput(qs->longfile); in qnx6_put_super() 487 iput(qs->inodes); in qnx6_put_super() 488 kfree(qs); in qnx6_put_super()
|
/Linux-v4.19/Documentation/devicetree/bindings/net/ |
D | mscc-ocelot.txt | 14 - "qs" 62 reg-names = "sys", "rew", "qs", "hsio", "port0",
|
/Linux-v4.19/net/sched/ |
D | sch_fq_codel.c | 634 struct gnet_stats_queue qs = { 0 }; in fq_codel_dump_class_stats() local 661 qs.qlen++; in fq_codel_dump_class_stats() 666 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats() 667 qs.drops = flow->dropped; in fq_codel_dump_class_stats() 669 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) in fq_codel_dump_class_stats()
|
D | sch_sfq.c | 862 struct gnet_stats_queue qs = { 0 }; in sfq_dump_class_stats() local 869 qs.qlen = slot->qlen; in sfq_dump_class_stats() 870 qs.backlog = slot->backlog; in sfq_dump_class_stats() 872 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) in sfq_dump_class_stats()
|
/Linux-v4.19/drivers/md/ |
D | dm-cache-policy-smq.c | 257 struct ilist qs[MAX_LEVELS]; member 278 l_init(q->qs + i); in q_init() 302 l_add_tail(q->es, q->qs + e->level, e); in q_push() 312 l_add_head(q->es, q->qs + e->level, e); in q_push_front() 322 l_add_before(q->es, q->qs + e->level, old, e); in q_push_before() 327 l_del(q->es, q->qs + e->level, e); in q_del() 343 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { in q_peek() 377 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) in __redist_pop_from() 379 l_del(q->es, q->qs + e->level, e); in __redist_pop_from() 436 l = q->qs + level; in q_redistribute() [all …]
|
/Linux-v4.19/drivers/net/ethernet/netronome/nfp/abm/ |
D | main.c | 63 u32 handle, unsigned int qs, u32 init_val) in __nfp_abm_reset_root() argument 72 alink->num_qdiscs = qs; in __nfp_abm_reset_root() 73 port->tc_offload_cnt = qs; in __nfp_abm_reset_root() 80 u32 handle, unsigned int qs) in nfp_abm_reset_root() argument 82 __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); in nfp_abm_reset_root()
|
/Linux-v4.19/arch/s390/include/uapi/asm/ |
D | runtime_instr.h | 22 __u32 qs : 1; member
|
/Linux-v4.19/include/uapi/sound/ |
D | hdspm.h | 40 qs enumerator
|
/Linux-v4.19/arch/mips/boot/dts/mscc/ |
D | ocelot.dtsi | 124 reg-names = "sys", "rew", "qs", "hsio", "port0",
|