/Linux-v5.4/drivers/crypto/ccp/ |
D | ccp-crypto-main.c | 55 struct list_head *backlog; member 97 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) in ccp_crypto_cmd_complete() argument 102 *backlog = NULL; in ccp_crypto_cmd_complete() 121 if (req_queue.backlog != &req_queue.cmds) { in ccp_crypto_cmd_complete() 123 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 124 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() 126 *backlog = container_of(req_queue.backlog, in ccp_crypto_cmd_complete() 128 req_queue.backlog = req_queue.backlog->next; in ccp_crypto_cmd_complete() 131 if (req_queue.backlog == &crypto_cmd->entry) in ccp_crypto_cmd_complete() 132 req_queue.backlog = crypto_cmd->entry.next; in ccp_crypto_cmd_complete() [all …]
|
D | ccp-dev.c | 315 list_add_tail(&cmd->entry, &ccp->backlog); in ccp_enqueue_cmd() 378 struct ccp_cmd *backlog = NULL; in ccp_dequeue_cmd() local 403 if (!list_empty(&ccp->backlog)) { in ccp_dequeue_cmd() 404 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, in ccp_dequeue_cmd() 406 list_del(&backlog->entry); in ccp_dequeue_cmd() 411 if (backlog) { in ccp_dequeue_cmd() 412 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); in ccp_dequeue_cmd() 413 schedule_work(&backlog->work); in ccp_dequeue_cmd() 488 INIT_LIST_HEAD(&ccp->backlog); in ccp_alloc_struct()
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_qdisc.c | 59 u64 backlog; member 277 stats_base->backlog = 0; in mlxsw_sp_setup_tc_qdisc_red_clean_stats() 287 root_qdisc->stats_base.backlog -= in mlxsw_sp_qdisc_red_destroy() 288 mlxsw_sp_qdisc->stats_base.backlog; in mlxsw_sp_qdisc_red_destroy() 349 u64 backlog; in mlxsw_sp_qdisc_red_unoffload() local 351 backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, in mlxsw_sp_qdisc_red_unoffload() 352 mlxsw_sp_qdisc->stats_base.backlog); in mlxsw_sp_qdisc_red_unoffload() 353 p->qstats->backlog -= backlog; in mlxsw_sp_qdisc_red_unoffload() 354 mlxsw_sp_qdisc->stats_base.backlog = 0; in mlxsw_sp_qdisc_red_unoffload() 389 u64 tx_bytes, tx_packets, overlimits, drops, backlog; in mlxsw_sp_qdisc_get_red_stats() local [all …]
|
/Linux-v5.4/tools/perf/ui/tui/ |
D | helpline.c | 34 static int backlog; in tui_helpline__show() local 37 ret = vscnprintf(ui_helpline__last_msg + backlog, in tui_helpline__show() 38 sizeof(ui_helpline__last_msg) - backlog, format, ap); in tui_helpline__show() 39 backlog += ret; in tui_helpline__show() 43 if (ui_helpline__last_msg[backlog - 1] == '\n') { in tui_helpline__show() 46 backlog = 0; in tui_helpline__show()
|
/Linux-v5.4/tools/perf/ui/gtk/ |
D | helpline.c | 32 static int backlog; in gtk_helpline_show() local 34 ret = vscnprintf(ui_helpline__current + backlog, in gtk_helpline_show() 35 sizeof(ui_helpline__current) - backlog, fmt, ap); in gtk_helpline_show() 36 backlog += ret; in gtk_helpline_show() 40 if (ptr && (ptr - ui_helpline__current) <= backlog) { in gtk_helpline_show() 43 backlog = 0; in gtk_helpline_show()
|
/Linux-v5.4/include/net/ |
D | fq_impl.h | 22 flow->backlog -= skb->len; in fq_adjust_removal() 23 fq->backlog--; in fq_adjust_removal() 31 if (flow->backlog == 0) { in fq_rejigger_backlog() 37 if (i->backlog < flow->backlog) in fq_rejigger_backlog() 150 if (i->backlog > flow->backlog) in fq_recalc_backlog() 170 flow->backlog += skb->len; in fq_tin_enqueue() 174 fq->backlog++; in fq_tin_enqueue() 186 while (fq->backlog > fq->limit || oom) { in fq_tin_enqueue() 264 WARN_ON_ONCE(flow->backlog); in fq_flow_reset()
|
D | codel_impl.h | 107 u32 *backlog, in codel_should_drop() argument 125 *backlog <= params->mtu) { in codel_should_drop() 143 u32 *backlog, in codel_dequeue() argument 162 skb_len_func, skb_time_func, backlog, now); in codel_dequeue() 198 backlog, now)) { in codel_dequeue() 223 skb_time_func, backlog, now); in codel_dequeue()
|
D | sch_generic.h | 854 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec() 860 this_cpu_sub(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); in qdisc_qstats_cpu_backlog_dec() 866 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc() 872 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); in qdisc_qstats_cpu_backlog_inc() 928 __u32 *backlog) in qdisc_qstats_qlen_backlog() argument 935 *backlog = qstats.backlog; in qdisc_qstats_qlen_backlog() 940 __u32 qlen, backlog; in qdisc_tree_flush_backlog() local 942 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); in qdisc_tree_flush_backlog() 943 qdisc_tree_reduce_backlog(sch, qlen, backlog); in qdisc_tree_flush_backlog() 948 __u32 qlen, backlog; in qdisc_purge_queue() local [all …]
|
D | fq.h | 34 u32 backlog; member 77 u32 backlog; member
|
D | red.h | 292 unsigned int backlog) in red_calc_qavg_no_idle_time() argument 303 return v->qavg + (backlog - (v->qavg >> p->Wlog)); in red_calc_qavg_no_idle_time() 308 unsigned int backlog) in red_calc_qavg() argument 311 return red_calc_qavg_no_idle_time(p, v, backlog); in red_calc_qavg()
|
/Linux-v5.4/net/sched/ |
D | sch_gred.c | 39 u32 backlog; /* bytes on the virtualQ */ member 117 return sch->qstats.backlog; in gred_backlog() 119 return q->backlog; in gred_backlog() 181 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue() 247 q->backlog += qdisc_pkt_len(skb); in gred_enqueue() 275 q->backlog -= qdisc_pkt_len(skb); in gred_dequeue() 278 if (!sch->qstats.backlog) in gred_dequeue() 281 if (!q->backlog) in gred_dequeue() 306 q->backlog = 0; in gred_reset() 344 opt.set.tab[i].backlog = &q->backlog; in gred_offload() [all …]
|
D | sch_sfq.c | 109 unsigned int backlog; member 308 slot->backlog -= len; in sfq_drop() 374 slot->backlog = 0; /* should already be 0 anyway... */ in sfq_enqueue() 381 slot->backlog); in sfq_enqueue() 432 sch->qstats.backlog -= delta; in sfq_enqueue() 433 slot->backlog -= delta; in sfq_enqueue() 443 slot->backlog += qdisc_pkt_len(skb); in sfq_enqueue() 504 slot->backlog -= qdisc_pkt_len(skb); in sfq_dequeue() 556 slot->backlog = 0; in sfq_rehash() 587 slot->backlog); in sfq_rehash() [all …]
|
D | sch_skbprio.c | 85 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 108 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 117 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue() 152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue() 213 sch->qstats.backlog = 0; in skbprio_reset()
|
D | sch_fifo.c | 21 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue() 44 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue() 50 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue()
|
/Linux-v5.4/drivers/crypto/qce/ |
D | core.c | 75 struct crypto_async_request *async_req, *backlog; in qce_handle_queue() local 90 backlog = crypto_get_backlog(&qce->queue); in qce_handle_queue() 100 if (backlog) { in qce_handle_queue() 102 backlog->complete(backlog, -EINPROGRESS); in qce_handle_queue()
|
/Linux-v5.4/net/core/ |
D | gen_stats.c | 291 qstats->backlog += qcpu->backlog; in __gnet_stats_copy_queue_cpu() 307 qstats->backlog = q->backlog; in __gnet_stats_copy_queue() 343 d->tc_stats.backlog = qstats.backlog; in gnet_stats_copy_queue()
|
/Linux-v5.4/net/sunrpc/ |
D | stats.c | 154 ktime_t backlog, execute, now; in rpc_count_iostats_metrics() local 170 backlog = 0; in rpc_count_iostats_metrics() 172 backlog = ktime_sub(req->rq_xtime, task->tk_start); in rpc_count_iostats_metrics() 173 op_metrics->om_queue = ktime_add(op_metrics->om_queue, backlog); in rpc_count_iostats_metrics() 185 trace_rpc_stats_latency(req->rq_task, backlog, req->rq_rtt, execute); in rpc_count_iostats_metrics()
|
/Linux-v5.4/crypto/ |
D | crypto_engine.c | 68 struct crypto_async_request *async_req, *backlog; in crypto_pump_requests() local 112 backlog = crypto_get_backlog(&engine->queue); in crypto_pump_requests() 118 if (backlog) in crypto_pump_requests() 119 backlog->complete(backlog, -EINPROGRESS); in crypto_pump_requests()
|
/Linux-v5.4/drivers/crypto/rockchip/ |
D | rk3288_crypto.c | 206 struct crypto_async_request *async_req, *backlog; in rk_crypto_queue_task_cb() local 212 backlog = crypto_get_backlog(&dev->queue); in rk_crypto_queue_task_cb() 222 if (backlog) { in rk_crypto_queue_task_cb() 223 backlog->complete(backlog, -EINPROGRESS); in rk_crypto_queue_task_cb() 224 backlog = NULL; in rk_crypto_queue_task_cb()
|
/Linux-v5.4/drivers/crypto/marvell/ |
D | tdma.c | 136 struct crypto_async_request *backlog = NULL; in mv_cesa_tdma_process() local 149 &backlog); in mv_cesa_tdma_process() 170 if (backlog) in mv_cesa_tdma_process() 171 backlog->complete(backlog, -EINPROGRESS); in mv_cesa_tdma_process()
|
D | cesa.c | 39 struct crypto_async_request **backlog) in mv_cesa_dequeue_req_locked() argument 43 *backlog = crypto_get_backlog(&engine->queue); in mv_cesa_dequeue_req_locked() 54 struct crypto_async_request *req = NULL, *backlog = NULL; in mv_cesa_rearm_engine() local 60 req = mv_cesa_dequeue_req_locked(engine, &backlog); in mv_cesa_rearm_engine() 68 if (backlog) in mv_cesa_rearm_engine() 69 backlog->complete(backlog, -EINPROGRESS); in mv_cesa_rearm_engine()
|
/Linux-v5.4/drivers/crypto/ |
D | mxs-dcp.c | 393 struct crypto_async_request *backlog; in dcp_chan_thread_aes() local 402 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_aes() 406 if (!backlog && !arq) { in dcp_chan_thread_aes() 413 if (backlog) in dcp_chan_thread_aes() 414 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_aes() 695 struct crypto_async_request *backlog; in dcp_chan_thread_sha() local 703 backlog = crypto_get_backlog(&sdcp->queue[chan]); in dcp_chan_thread_sha() 707 if (!backlog && !arq) { in dcp_chan_thread_sha() 714 if (backlog) in dcp_chan_thread_sha() 715 backlog->complete(backlog, -EINPROGRESS); in dcp_chan_thread_sha()
|
D | omap-aes-gcm.c | 250 struct aead_request *backlog; in omap_aes_gcm_handle_queue() local 263 backlog = aead_get_backlog(&dd->aead_queue); in omap_aes_gcm_handle_queue() 272 if (backlog) in omap_aes_gcm_handle_queue() 273 backlog->base.complete(&backlog->base, -EINPROGRESS); in omap_aes_gcm_handle_queue()
|
/Linux-v5.4/drivers/net/ipvlan/ |
D | ipvlan_core.c | 240 spin_lock_bh(&port->backlog.lock); in ipvlan_process_multicast() 241 skb_queue_splice_tail_init(&port->backlog, &list); in ipvlan_process_multicast() 242 spin_unlock_bh(&port->backlog.lock); in ipvlan_process_multicast() 547 spin_lock(&port->backlog.lock); in ipvlan_multicast_enqueue() 548 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { in ipvlan_multicast_enqueue() 551 __skb_queue_tail(&port->backlog, skb); in ipvlan_multicast_enqueue() 552 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue() 555 spin_unlock(&port->backlog.lock); in ipvlan_multicast_enqueue()
|
/Linux-v5.4/net/tipc/ |
D | link.c | 164 } backlog[5]; member 864 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len; in link_prepare_wakeup() 907 l->backlog[imp].len = 0; in tipc_link_reset() 908 l->backlog[imp].target_bskb = NULL; in tipc_link_reset() 964 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { in tipc_link_xmit() 1003 tskb = &l->backlog[imp].target_bskb; in tipc_link_xmit() 1012 l->backlog[imp].len++; in tipc_link_xmit() 1017 l->backlog[imp].target_bskb = NULL; in tipc_link_xmit() 1018 l->backlog[imp].len += skb_queue_len(list); in tipc_link_xmit() 1045 l->backlog[imp].len--; in tipc_link_advance_backlog() [all …]
|