/Linux-v4.19/net/core/ |
D | gen_stats.c | 251 __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats, in __gnet_stats_copy_queue_cpu() argument 259 qstats->qlen = 0; in __gnet_stats_copy_queue_cpu() 260 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 261 qstats->drops += qcpu->drops; in __gnet_stats_copy_queue_cpu() 262 qstats->requeues += qcpu->requeues; in __gnet_stats_copy_queue_cpu() 263 qstats->overlimits += qcpu->overlimits; in __gnet_stats_copy_queue_cpu() 267 void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, in __gnet_stats_copy_queue() argument 273 __gnet_stats_copy_queue_cpu(qstats, cpu); in __gnet_stats_copy_queue() 275 qstats->qlen = q->qlen; in __gnet_stats_copy_queue() 276 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() [all …]
|
/Linux-v4.19/net/sched/ |
D | sch_skbprio.c | 39 struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; member 89 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 105 q->qstats[prio].drops++; in skbprio_enqueue() 106 q->qstats[prio].overlimits++; in skbprio_enqueue() 112 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 121 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue() 122 q->qstats[lp].drops++; in skbprio_enqueue() 123 q->qstats[lp].overlimits++; in skbprio_enqueue() 156 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue() 190 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init() [all …]
|
D | sch_mq.c | 49 .qstats = &sch->qstats, in mq_offload_stats() 142 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mq_dump() 158 __gnet_stats_copy_queue(&sch->qstats, in mq_dump() 160 &qdisc->qstats, qlen); in mq_dump() 165 sch->qstats.qlen += qdisc->qstats.qlen; in mq_dump() 166 sch->qstats.backlog += qdisc->qstats.backlog; in mq_dump() 167 sch->qstats.drops += qdisc->qstats.drops; in mq_dump() 168 sch->qstats.requeues += qdisc->qstats.requeues; in mq_dump() 169 sch->qstats.overlimits += qdisc->qstats.overlimits; in mq_dump() 246 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) in mq_dump_class_stats()
|
D | sch_mqprio.c | 396 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mqprio_dump() 413 __gnet_stats_copy_queue(&sch->qstats, in mqprio_dump() 415 &qdisc->qstats, qlen); in mqprio_dump() 420 sch->qstats.backlog += qdisc->qstats.backlog; in mqprio_dump() 421 sch->qstats.drops += qdisc->qstats.drops; in mqprio_dump() 422 sch->qstats.requeues += qdisc->qstats.requeues; in mqprio_dump() 423 sch->qstats.overlimits += qdisc->qstats.overlimits; in mqprio_dump() 517 struct gnet_stats_queue qstats = {0}; in mqprio_dump_class_stats() local 545 __gnet_stats_copy_queue(&sch->qstats, in mqprio_dump_class_stats() 547 &qdisc->qstats, in mqprio_dump_class_stats() [all …]
|
D | sch_red.c | 70 child->qstats.backlog); in red_enqueue() 147 sch->qstats.backlog = 0; in red_reset() 170 opt.set.qstats = &sch->qstats; in red_offload() 235 q->qdisc->qstats.backlog); in red_change() 291 .stats.qstats = &sch->qstats, in red_dump_offload_stats()
|
D | sch_prio.c | 141 sch->qstats.backlog = 0; in prio_reset() 161 opt.replace_params.qstats = &sch->qstats; in prio_offload() 222 child->qstats.backlog); in prio_tune() 262 .qstats = &sch->qstats, in prio_dump_offload() 400 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) in prio_dump_class_stats()
|
D | sch_fifo.c | 25 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue() 48 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue() 54 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
|
D | sch_drr.c | 26 struct gnet_stats_queue qstats; member 56 unsigned int backlog = cl->qdisc->qstats.backlog; in drr_purge_queue() 282 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) in drr_dump_class_stats() 368 cl->qstats.drops++; in drr_enqueue() 453 sch->qstats.backlog = 0; in drr_reset_qdisc()
|
D | sch_fq_codel.c | 183 sch->qstats.drops += i; in fq_codel_drop() 184 sch->qstats.backlog -= len; in fq_codel_drop() 226 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue() 239 prev_backlog -= sch->qstats.backlog; in fq_codel_enqueue() 274 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 313 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue() 364 sch->qstats.backlog = 0; in fq_codel_reset()
|
D | sch_tbf.c | 278 sch->qstats.backlog = 0; in tbf_reset() 394 q->qdisc->qstats.backlog); in tbf_change() 450 sch->qstats.backlog = q->qdisc->qstats.backlog; in tbf_dump()
|
D | sch_multiq.c | 206 child->qstats.backlog); in multiq_tune() 230 old->qstats.backlog); in multiq_tune() 347 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) in multiq_dump_class_stats()
|
D | sch_hhf.c | 404 prev_backlog = sch->qstats.backlog; in hhf_enqueue() 413 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue() 562 prev_backlog = sch->qstats.backlog; in hhf_change() 569 prev_backlog - sch->qstats.backlog); in hhf_change()
|
D | sch_codel.c | 75 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
|
D | sch_sfb.c | 459 sch->qstats.backlog = 0; in sfb_reset() 525 q->qdisc->qstats.backlog); in sfb_change() 583 sch->qstats.backlog = q->qdisc->qstats.backlog; in sfb_dump()
|
D | sch_pie.c | 120 if (sch->qstats.backlog < 2 * mtu) in drop_early() 252 int qlen = sch->qstats.backlog; /* current queue size in bytes */ in pie_process_dequeue() 317 u32 qlen = sch->qstats.backlog; /* queue size in bytes */ in calculate_probability()
|
/Linux-v4.19/kernel/locking/ |
D | qspinlock_stat.h | 91 static DEFINE_PER_CPU(unsigned long, qstats[qstat_num]); 121 stat += per_cpu(qstats[counter], cpu); in qstat_read() 129 kicks += per_cpu(qstats[qstat_pv_kick_unlock], cpu); in qstat_read() 133 kicks += per_cpu(qstats[qstat_pv_kick_wake], cpu); in qstat_read() 185 unsigned long *ptr = per_cpu_ptr(qstats, cpu); in qstat_write() 244 this_cpu_inc(qstats[stat]); in qstat_inc() 252 this_cpu_add(qstats[qstat_pv_hash_hops], hopcnt); in qstat_hop() 264 this_cpu_add(qstats[qstat_pv_latency_kick], sched_clock() - start); in __pv_kick() 277 this_cpu_add(qstats[qstat_pv_latency_wake], in __pv_wait()
|
/Linux-v4.19/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_stats.h | 428 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 443 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 456 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 461 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ 462 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ 463 + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ 468 qstats_old->f = qstats->f; \ 473 ADD_64(estats->s##_hi, qstats->s##_hi, \ 474 estats->s##_lo, qstats->s##_lo); \ 477 qstats_old->s##_hi_old = qstats->s##_hi; \ [all …]
|
D | bnx2x_stats.c | 951 struct bnx2x_eth_q_stats *qstats = in bnx2x_storm_stats_update() local 975 qstats->total_bytes_received_hi = in bnx2x_storm_stats_update() 976 qstats->total_broadcast_bytes_received_hi; in bnx2x_storm_stats_update() 977 qstats->total_bytes_received_lo = in bnx2x_storm_stats_update() 978 qstats->total_broadcast_bytes_received_lo; in bnx2x_storm_stats_update() 980 ADD_64(qstats->total_bytes_received_hi, in bnx2x_storm_stats_update() 981 qstats->total_multicast_bytes_received_hi, in bnx2x_storm_stats_update() 982 qstats->total_bytes_received_lo, in bnx2x_storm_stats_update() 983 qstats->total_multicast_bytes_received_lo); in bnx2x_storm_stats_update() 985 ADD_64(qstats->total_bytes_received_hi, in bnx2x_storm_stats_update() [all …]
|
/Linux-v4.19/include/net/ |
D | sch_generic.h | 101 struct gnet_stats_queue qstats; member 242 struct gnet_stats_queue *qstats; member 399 __u32 qlen = q->qstats.qlen; in qdisc_qlen_sum() 758 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec() 770 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc() 796 sch->qstats.drops += count; in __qdisc_qstats_drop() 799 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) in qstats_drop_inc() argument 801 qstats->drops++; in qstats_drop_inc() 804 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) in qstats_overlimit_inc() argument 806 qstats->overlimits++; in qstats_overlimit_inc() [all …]
|
D | gen_stats.h | 52 void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
|
/Linux-v4.19/drivers/infiniband/hw/hfi1/ |
D | vnic_main.c | 171 #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \ argument 173 for (src64 = &qstats->x_grp.unicast, \ 189 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats() local 194 stats->tx_drop_state += qstats->tx_drop_state; in hfi1_vnic_update_stats() 195 stats->tx_dlid_zero += qstats->tx_dlid_zero; in hfi1_vnic_update_stats() 197 SUM_GRP_COUNTERS(stats, qstats, tx_grp); in hfi1_vnic_update_stats() 204 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats() local 209 stats->rx_drop_state += qstats->rx_drop_state; in hfi1_vnic_update_stats() 210 stats->rx_oversize += qstats->rx_oversize; in hfi1_vnic_update_stats() 211 stats->rx_runt += qstats->rx_runt; in hfi1_vnic_update_stats() [all …]
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_qdisc.c | 353 p->qstats->backlog -= backlog; in mlxsw_sp_qdisc_red_unoffload() 410 stats_ptr->qstats->overlimits += overlimits; in mlxsw_sp_qdisc_get_red_stats() 411 stats_ptr->qstats->drops += drops; in mlxsw_sp_qdisc_get_red_stats() 412 stats_ptr->qstats->backlog += in mlxsw_sp_qdisc_get_red_stats() 556 p->qstats->backlog -= backlog; in mlxsw_sp_qdisc_prio_unoffload() 585 stats_ptr->qstats->drops += drops; in mlxsw_sp_qdisc_get_prio_stats() 586 stats_ptr->qstats->backlog += in mlxsw_sp_qdisc_get_prio_stats()
|
/Linux-v4.19/Documentation/networking/ |
D | gen_stats.txt | 20 struct gnet_stats_queue qstats; 26 mystruct->qstats.backlog += skb->pkt_len; 41 gnet_stats_copy_queue(&dump, &mystruct->qstats) < 0 ||
|
/Linux-v4.19/drivers/net/ethernet/netronome/nfp/abm/ |
D | main.c | 189 opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; in nfp_abm_red_replace() 190 opt->set.qstats->backlog -= in nfp_abm_red_replace() 204 stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; in nfp_abm_update_stats() 205 stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; in nfp_abm_update_stats() 206 stats->qstats->overlimits += new->overlimits - old->overlimits; in nfp_abm_update_stats() 207 stats->qstats->drops += new->drops - old->drops; in nfp_abm_update_stats()
|
/Linux-v4.19/drivers/net/hyperv/ |
D | netvsc_drv.c | 1336 const struct netvsc_stats *qstats; in netvsc_get_ethtool_stats() local 1354 qstats = &nvdev->chan_table[j].tx_stats; in netvsc_get_ethtool_stats() 1357 start = u64_stats_fetch_begin_irq(&qstats->syncp); in netvsc_get_ethtool_stats() 1358 packets = qstats->packets; in netvsc_get_ethtool_stats() 1359 bytes = qstats->bytes; in netvsc_get_ethtool_stats() 1360 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start)); in netvsc_get_ethtool_stats() 1364 qstats = &nvdev->chan_table[j].rx_stats; in netvsc_get_ethtool_stats() 1366 start = u64_stats_fetch_begin_irq(&qstats->syncp); in netvsc_get_ethtool_stats() 1367 packets = qstats->packets; in netvsc_get_ethtool_stats() 1368 bytes = qstats->bytes; in netvsc_get_ethtool_stats() [all …]
|