/Linux-v5.15/include/net/ |
D | fq_impl.h | 16 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, in __fq_adjust_removal() argument 25 fq->backlog -= packets; in __fq_adjust_removal() 26 fq->memory_usage -= truesize; in __fq_adjust_removal() 36 idx = flow - fq->flows; in __fq_adjust_removal() 37 __clear_bit(idx, fq->flows_bitmap); in __fq_adjust_removal() 40 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument 44 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); in fq_adjust_removal() 47 static struct sk_buff *fq_flow_dequeue(struct fq *fq, in fq_flow_dequeue() argument 52 lockdep_assert_held(&fq->lock); in fq_flow_dequeue() 58 fq_adjust_removal(fq, flow, skb); in fq_flow_dequeue() [all …]
|
D | ipv6_frag.h | 33 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6frag_init() local 37 fq->ecn = 0; in ip6frag_init() 48 const struct inet_frag_queue *fq = data; in ip6frag_obj_hashfn() local 50 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn() 58 const struct inet_frag_queue *fq = ptr; in ip6frag_obj_cmpfn() local 60 return !!memcmp(&fq->key, key, sizeof(*key)); in ip6frag_obj_cmpfn() 64 ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) in ip6frag_expire_frag_queue() argument 70 if (fq->q.fqdir->dead) in ip6frag_expire_frag_queue() 72 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue() 74 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue() [all …]
|
D | fq.h | 64 struct fq { struct 81 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument 85 typedef void fq_skb_free_t(struct fq *, 91 typedef bool fq_skb_filter_t(struct fq *, 97 typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
|
/Linux-v5.15/block/ |
D | blk-flush.c | 96 struct blk_flush_queue *fq, unsigned int flags); 164 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument 168 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq() 185 fq->flush_pending_since = jiffies; in blk_flush_complete_seq() 190 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq() 211 blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq() 220 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local 223 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io() 226 fq->rq_status = error; in flush_end_io() 227 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io() [all …]
|
/Linux-v5.15/net/ipv6/netfilter/ |
D | nf_conntrack_reasm.c | 126 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, 137 struct frag_queue *fq; in nf_ct_frag6_expire() local 139 fq = container_of(frag, struct frag_queue, q); in nf_ct_frag6_expire() 141 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire() 166 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, in nf_ct_frag6_queue() argument 175 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue() 205 if (end < fq->q.len || in nf_ct_frag6_queue() 206 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue() 210 fq->q.flags |= INET_FRAG_LAST_IN; in nf_ct_frag6_queue() 211 fq->q.len = end; in nf_ct_frag6_queue() [all …]
|
/Linux-v5.15/net/ieee802154/6lowpan/ |
D | reassembly.c | 33 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, 47 struct frag_queue *fq; in lowpan_frag_expire() local 49 fq = container_of(frag, struct frag_queue, q); in lowpan_frag_expire() 51 spin_lock(&fq->q.lock); in lowpan_frag_expire() 53 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire() 56 inet_frag_kill(&fq->q); in lowpan_frag_expire() 58 spin_unlock(&fq->q.lock); in lowpan_frag_expire() 59 inet_frag_put(&fq->q); in lowpan_frag_expire() 84 static int lowpan_frag_queue(struct lowpan_frag_queue *fq, in lowpan_frag_queue() argument 97 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_queue() [all …]
|
/Linux-v5.15/net/ipv6/ |
D | reassembly.c | 70 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, 76 struct frag_queue *fq; in ip6_frag_expire() local 78 fq = container_of(frag, struct frag_queue, q); in ip6_frag_expire() 80 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire() 106 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, in ip6_frag_queue() argument 117 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_frag_queue() 147 if (end < fq->q.len || in ip6_frag_queue() 148 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue() 150 fq->q.flags |= INET_FRAG_LAST_IN; in ip6_frag_queue() 151 fq->q.len = end; in ip6_frag_queue() [all …]
|
/Linux-v5.15/drivers/soc/fsl/qbman/ |
D | qman.c | 268 struct qm_mcc_fq fq; member 957 static inline void fq_set(struct qman_fq *fq, u32 mask) in fq_set() argument 959 fq->flags |= mask; in fq_set() 962 static inline void fq_clear(struct qman_fq *fq, u32 mask) in fq_clear() argument 964 fq->flags &= ~mask; in fq_clear() 967 static inline int fq_isset(struct qman_fq *fq, u32 mask) in fq_isset() argument 969 return fq->flags & mask; in fq_isset() 972 static inline int fq_isclear(struct qman_fq *fq, u32 mask) in fq_isclear() argument 974 return !(fq->flags & mask); in fq_isclear() 1121 struct qman_fq *fq; in idx_to_fq() local [all …]
|
D | qman_test_api.c | 106 static int do_enqueues(struct qman_fq *fq) in do_enqueues() argument 112 if (qman_enqueue(fq, &fd)) { in do_enqueues() 126 struct qman_fq *fq = &fq_base; in qman_test_api() local 133 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api() 138 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); in qman_test_api() 144 err = do_enqueues(fq); in qman_test_api() 149 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api() 154 err = do_enqueues(fq); in qman_test_api() 159 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api() 167 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api() [all …]
|
/Linux-v5.15/drivers/iommu/ |
D | iova.c | 67 iovad->fq = NULL; in init_iova_domain() 78 return !!iovad->fq; in has_iova_flush_queue() 91 free_percpu(iovad->fq); in free_iova_flush_queue() 93 iovad->fq = NULL; in free_iova_flush_queue() 115 struct iova_fq *fq; in init_iova_flush_queue() local 117 fq = per_cpu_ptr(queue, cpu); in init_iova_flush_queue() 118 fq->head = 0; in init_iova_flush_queue() 119 fq->tail = 0; in init_iova_flush_queue() 121 spin_lock_init(&fq->lock); in init_iova_flush_queue() 124 iovad->fq = queue; in init_iova_flush_queue() [all …]
|
/Linux-v5.15/drivers/net/ethernet/freescale/dpaa2/ |
D | dpaa2-eth-debugfs.c | 47 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument 49 switch (fq->type) { in fq_type_to_str() 62 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local 71 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show() 72 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show() 77 if (!fq->stats.frames && !fcnt) in dpaa2_dbg_fqs_show() 81 fq->fqid, in dpaa2_dbg_fqs_show() 82 fq->target_cpu, in dpaa2_dbg_fqs_show() 83 fq->tc, in dpaa2_dbg_fqs_show() 84 fq_type_to_str(fq), in dpaa2_dbg_fqs_show() [all …]
|
D | dpaa2-eth.c | 254 struct dpaa2_eth_fq *fq, in dpaa2_eth_xdp_flush() argument 269 err = priv->enqueue(priv, fq, &fds[total_enqueued], in dpaa2_eth_xdp_flush() 284 struct dpaa2_eth_fq *fq) in dpaa2_eth_xdp_tx_flush() argument 293 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); in dpaa2_eth_xdp_tx_flush() 297 fds = fq->xdp_tx_fds.fds; in dpaa2_eth_xdp_tx_flush() 302 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { in dpaa2_eth_xdp_tx_flush() 307 fq->xdp_tx_fds.num = 0; in dpaa2_eth_xdp_tx_flush() 317 struct dpaa2_eth_fq *fq; in dpaa2_eth_xdp_enqueue() local 334 fq = &priv->fq[queue_id]; in dpaa2_eth_xdp_enqueue() 335 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; in dpaa2_eth_xdp_enqueue() [all …]
|
D | dpaa2-switch.c | 661 napi_enable(ðsw->fq[i].napi); in dpaa2_switch_enable_ctrl_if_napi() 677 napi_disable(ðsw->fq[i].napi); in dpaa2_switch_disable_ctrl_if_napi() 2375 static void dpaa2_switch_tx_conf(struct dpaa2_switch_fq *fq, in dpaa2_switch_tx_conf() argument 2378 dpaa2_switch_free_fd(fq->ethsw, fd); in dpaa2_switch_tx_conf() 2381 static void dpaa2_switch_rx(struct dpaa2_switch_fq *fq, in dpaa2_switch_rx() argument 2384 struct ethsw_core *ethsw = fq->ethsw; in dpaa2_switch_rx() 2468 ethsw->fq[i].fqid = ctrl_if_attr.rx_fqid; in dpaa2_switch_setup_fqs() 2469 ethsw->fq[i].ethsw = ethsw; in dpaa2_switch_setup_fqs() 2470 ethsw->fq[i++].type = DPSW_QUEUE_RX; in dpaa2_switch_setup_fqs() 2472 ethsw->fq[i].fqid = ctrl_if_attr.tx_err_conf_fqid; in dpaa2_switch_setup_fqs() [all …]
|
/Linux-v5.15/drivers/net/ethernet/freescale/dpaa/ |
D | dpaa_eth_trace.h | 58 struct qman_fq *fq, 62 TP_ARGS(netdev, fq, fd), 80 __entry->fqid = fq->fqid; 104 struct qman_fq *fq, 107 TP_ARGS(netdev, fq, fd) 114 struct qman_fq *fq, 117 TP_ARGS(netdev, fq, fd) 124 struct qman_fq *fq, 127 TP_ARGS(netdev, fq, fd)
|
D | dpaa_eth_sysfs.c | 59 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local 66 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids() 67 switch (fq->fq_type) { in dpaa_eth_show_fqids() 93 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids() 104 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids() 106 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 108 first_fqid = fq->fqid; in dpaa_eth_show_fqids() 109 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 112 prev = fq; in dpaa_eth_show_fqids()
|
D | dpaa_eth.c | 640 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) in dpaa_assign_wq() argument 642 switch (fq->fq_type) { in dpaa_assign_wq() 645 fq->wq = 1; in dpaa_assign_wq() 649 fq->wq = 5; in dpaa_assign_wq() 653 fq->wq = 6; in dpaa_assign_wq() 659 fq->wq = 6; in dpaa_assign_wq() 663 fq->wq = 2; in dpaa_assign_wq() 667 fq->wq = 1; in dpaa_assign_wq() 671 fq->wq = 0; in dpaa_assign_wq() 680 fq->fq_type, fq->fqid); in dpaa_assign_wq() [all …]
|
/Linux-v5.15/drivers/crypto/caam/ |
D | qi.c | 135 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, in caam_fq_ern_cb() argument 218 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) in empty_retired_fq() argument 222 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | in empty_retired_fq() 227 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); in empty_retired_fq() 236 } while (fq->flags & QMAN_FQ_STATE_NE); in empty_retired_fq() 241 static int kill_fq(struct device *qidev, struct qman_fq *fq) in kill_fq() argument 246 ret = qman_retire_fq(fq, &flags); in kill_fq() 260 } while (fq->state != qman_fq_state_retired); in kill_fq() 262 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); in kill_fq() 263 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); in kill_fq() [all …]
|
/Linux-v5.15/net/ipv4/ |
D | inet_fragment.c | 130 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local 133 count = del_timer_sync(&fq->timer) ? 1 : 0; in inet_frags_free_cb() 135 spin_lock_bh(&fq->lock); in inet_frags_free_cb() 136 if (!(fq->flags & INET_FRAG_COMPLETE)) { in inet_frags_free_cb() 137 fq->flags |= INET_FRAG_COMPLETE; in inet_frags_free_cb() 139 } else if (fq->flags & INET_FRAG_HASH_DEAD) { in inet_frags_free_cb() 142 spin_unlock_bh(&fq->lock); in inet_frags_free_cb() 144 if (refcount_sub_and_test(count, &fq->refcnt)) in inet_frags_free_cb() 145 inet_frag_destroy(fq); in inet_frags_free_cb() 225 void inet_frag_kill(struct inet_frag_queue *fq) in inet_frag_kill() argument [all …]
|
/Linux-v5.15/net/mac80211/ |
D | debugfs.c | 81 struct fq *fq = &local->fq; in aqm_read() local 85 spin_lock_bh(&local->fq.lock); in aqm_read() 99 fq->flows_cnt, in aqm_read() 100 fq->backlog, in aqm_read() 101 fq->overmemory, in aqm_read() 102 fq->overlimit, in aqm_read() 103 fq->collisions, in aqm_read() 104 fq->memory_usage, in aqm_read() 105 fq->memory_limit, in aqm_read() 106 fq->limit, in aqm_read() [all …]
|
D | tx.c | 1335 struct fq *fq; in codel_dequeue_func() local 1340 fq = &local->fq; in codel_dequeue_func() 1345 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func() 1347 return fq_flow_dequeue(fq, flow); in codel_dequeue_func() 1364 static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, in fq_tin_dequeue_func() argument 1374 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func() 1389 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func() 1402 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument 1409 local = container_of(fq, struct ieee80211_local, fq); in fq_skb_free_func() 1417 struct fq *fq = &local->fq; in ieee80211_txq_enqueue() local [all …]
|
/Linux-v5.15/include/soc/fsl/ |
D | qman.h | 300 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member 691 struct qman_fq *fq, 699 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, 969 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); 979 void qman_destroy_fq(struct qman_fq *fq); 985 u32 qman_fq_fqid(struct qman_fq *fq); 1021 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); 1030 int qman_schedule_fq(struct qman_fq *fq); 1050 int qman_retire_fq(struct qman_fq *fq, u32 *flags); 1059 int qman_oos_fq(struct qman_fq *fq); [all …]
|
/Linux-v5.15/net/xdp/ |
D | xsk_buff_pool.c | 77 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem() 200 if (!pool->fq || !pool->cq) in xp_assign_dev_shared() 230 if (pool->fq) { in xp_release_deferred() 231 xskq_destroy(pool->fq); in xp_release_deferred() 232 pool->fq = NULL; in xp_release_deferred() 454 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { in __xp_alloc() 455 pool->fq->queue_empty_descs++; in __xp_alloc() 463 pool->fq->invalid_descs++; in __xp_alloc() 464 xskq_cons_release(pool->fq); in __xp_alloc() 469 xskq_cons_release(pool->fq); in __xp_alloc() [all …]
|
/Linux-v5.15/samples/bpf/ |
D | xdpsock_user.c | 139 struct xsk_ring_prod fq; member 826 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, in xsk_configure_umem() 840 ret = xsk_ring_prod__reserve(&umem->fq, in xsk_populate_fill_ring() 845 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = in xsk_populate_fill_ring() 847 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2); in xsk_populate_fill_ring() 1154 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); in complete_tx_l2fwd() 1158 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&umem->fq)) { in complete_tx_l2fwd() 1163 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); in complete_tx_l2fwd() 1167 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = in complete_tx_l2fwd() 1170 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); in complete_tx_l2fwd() [all …]
|
/Linux-v5.15/tools/testing/selftests/netfilter/ |
D | nft_trans_stress.sh | 56 ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null & 57 ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
|
/Linux-v5.15/tools/testing/selftests/bpf/ |
D | xdpxceiver.c | 255 &umem->fq, &umem->cq, &cfg); in xsk_configure_umem() 269 ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx); in xsk_populate_fill_ring() 273 *xsk_ring_prod__fill_addr(&umem->fq, idx++) = i * XSK_UMEM__DEFAULT_FRAME_SIZE; in xsk_populate_fill_ring() 274 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS); in xsk_populate_fill_ring() 585 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { in receive_pkts() 593 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); in receive_pkts() 597 if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { in receive_pkts() 602 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); in receive_pkts() 614 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; in receive_pkts() 618 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); in receive_pkts()
|