/Linux-v5.4/include/net/ |
D | fq_impl.h | 14 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument 23 fq->backlog--; in fq_adjust_removal() 24 fq->memory_usage -= skb->truesize; in fq_adjust_removal() 27 static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) in fq_rejigger_backlog() argument 36 list_for_each_entry_continue(i, &fq->backlogs, backlogchain) in fq_rejigger_backlog() 45 static struct sk_buff *fq_flow_dequeue(struct fq *fq, in fq_flow_dequeue() argument 50 lockdep_assert_held(&fq->lock); in fq_flow_dequeue() 56 fq_adjust_removal(fq, flow, skb); in fq_flow_dequeue() 57 fq_rejigger_backlog(fq, flow); in fq_flow_dequeue() 62 static struct sk_buff *fq_tin_dequeue(struct fq *fq, in fq_tin_dequeue() argument [all …]
|
D | ipv6_frag.h | 33 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6frag_init() local 37 fq->ecn = 0; in ip6frag_init() 48 const struct inet_frag_queue *fq = data; in ip6frag_obj_hashfn() local 50 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn() 58 const struct inet_frag_queue *fq = ptr; in ip6frag_obj_cmpfn() local 60 return !!memcmp(&fq->key, key, sizeof(*key)); in ip6frag_obj_cmpfn() 64 ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) in ip6frag_expire_frag_queue() argument 70 if (fq->q.fqdir->dead) in ip6frag_expire_frag_queue() 72 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue() 74 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue() [all …]
|
D | fq.h | 67 struct fq { struct 83 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument 87 typedef void fq_skb_free_t(struct fq *, 93 typedef bool fq_skb_filter_t(struct fq *, 99 typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
|
/Linux-v5.4/block/ |
D | blk-flush.c | 96 struct blk_flush_queue *fq, unsigned int flags); 156 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument 160 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq() 177 fq->flush_pending_since = jiffies; in blk_flush_complete_seq() 182 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq() 203 blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq() 212 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local 216 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io() 219 fq->rq_status = error; in flush_end_io() 220 spin_unlock_irqrestore(&fq->mq_flush_lock, flags); in flush_end_io() [all …]
|
/Linux-v5.4/net/ipv6/netfilter/ |
D | nf_conntrack_reasm.c | 130 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, 141 struct frag_queue *fq; in nf_ct_frag6_expire() local 143 fq = container_of(frag, struct frag_queue, q); in nf_ct_frag6_expire() 145 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire() 169 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, in nf_ct_frag6_queue() argument 178 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue() 208 if (end < fq->q.len || in nf_ct_frag6_queue() 209 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue() 213 fq->q.flags |= INET_FRAG_LAST_IN; in nf_ct_frag6_queue() 214 fq->q.len = end; in nf_ct_frag6_queue() [all …]
|
/Linux-v5.4/net/ieee802154/6lowpan/ |
D | reassembly.c | 33 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, 47 struct frag_queue *fq; in lowpan_frag_expire() local 49 fq = container_of(frag, struct frag_queue, q); in lowpan_frag_expire() 51 spin_lock(&fq->q.lock); in lowpan_frag_expire() 53 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire() 56 inet_frag_kill(&fq->q); in lowpan_frag_expire() 58 spin_unlock(&fq->q.lock); in lowpan_frag_expire() 59 inet_frag_put(&fq->q); in lowpan_frag_expire() 84 static int lowpan_frag_queue(struct lowpan_frag_queue *fq, in lowpan_frag_queue() argument 97 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_queue() [all …]
|
/Linux-v5.4/net/ipv6/ |
D | reassembly.c | 68 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, 74 struct frag_queue *fq; in ip6_frag_expire() local 76 fq = container_of(frag, struct frag_queue, q); in ip6_frag_expire() 78 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire() 104 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, in ip6_frag_queue() argument 115 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_frag_queue() 145 if (end < fq->q.len || in ip6_frag_queue() 146 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue() 148 fq->q.flags |= INET_FRAG_LAST_IN; in ip6_frag_queue() 149 fq->q.len = end; in ip6_frag_queue() [all …]
|
/Linux-v5.4/drivers/soc/fsl/qbman/ |
D | qman.c | 268 struct qm_mcc_fq fq; member 962 static inline void fq_set(struct qman_fq *fq, u32 mask) in fq_set() argument 964 fq->flags |= mask; in fq_set() 967 static inline void fq_clear(struct qman_fq *fq, u32 mask) in fq_clear() argument 969 fq->flags &= ~mask; in fq_clear() 972 static inline int fq_isset(struct qman_fq *fq, u32 mask) in fq_isset() argument 974 return fq->flags & mask; in fq_isset() 977 static inline int fq_isclear(struct qman_fq *fq, u32 mask) in fq_isclear() argument 979 return !(fq->flags & mask); in fq_isclear() 1126 struct qman_fq *fq; in idx_to_fq() local [all …]
|
D | qman_test_api.c | 105 static int do_enqueues(struct qman_fq *fq) in do_enqueues() argument 111 if (qman_enqueue(fq, &fd)) { in do_enqueues() 125 struct qman_fq *fq = &fq_base; in qman_test_api() local 132 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api() 137 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); in qman_test_api() 143 err = do_enqueues(fq); in qman_test_api() 148 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api() 153 err = do_enqueues(fq); in qman_test_api() 158 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api() 166 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api() [all …]
|
D | qman_test_stash.c | 277 struct qman_fq *fq, in normal_dqrr() argument 280 struct hp_handler *handler = (struct hp_handler *)fq; in normal_dqrr() 295 struct qman_fq *fq, in special_dqrr() argument 298 struct hp_handler *handler = (struct hp_handler *)fq; in special_dqrr()
|
/Linux-v5.4/drivers/iommu/ |
D | iova.c | 49 iovad->fq = NULL; in init_iova_domain() 59 return !!iovad->fq; in has_iova_flush_queue() 72 free_percpu(iovad->fq); in free_iova_flush_queue() 74 iovad->fq = NULL; in free_iova_flush_queue() 96 struct iova_fq *fq; in init_iova_flush_queue() local 98 fq = per_cpu_ptr(queue, cpu); in init_iova_flush_queue() 99 fq->head = 0; in init_iova_flush_queue() 100 fq->tail = 0; in init_iova_flush_queue() 102 spin_lock_init(&fq->lock); in init_iova_flush_queue() 107 iovad->fq = queue; in init_iova_flush_queue() [all …]
|
/Linux-v5.4/drivers/net/ethernet/freescale/dpaa/ |
D | dpaa_eth_trace.h | 58 struct qman_fq *fq, 62 TP_ARGS(netdev, fq, fd), 80 __entry->fqid = fq->fqid; 104 struct qman_fq *fq, 107 TP_ARGS(netdev, fq, fd) 114 struct qman_fq *fq, 117 TP_ARGS(netdev, fq, fd) 124 struct qman_fq *fq, 127 TP_ARGS(netdev, fq, fd)
|
D | dpaa_eth_sysfs.c | 59 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local 66 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids() 67 switch (fq->fq_type) { in dpaa_eth_show_fqids() 93 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids() 104 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids() 106 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 108 first_fqid = fq->fqid; in dpaa_eth_show_fqids() 109 last_fqid = fq->fqid; in dpaa_eth_show_fqids() 112 prev = fq; in dpaa_eth_show_fqids()
|
D | dpaa_eth.c | 618 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) in dpaa_assign_wq() argument 620 switch (fq->fq_type) { in dpaa_assign_wq() 623 fq->wq = 1; in dpaa_assign_wq() 627 fq->wq = 5; in dpaa_assign_wq() 631 fq->wq = 6; in dpaa_assign_wq() 637 fq->wq = 6; in dpaa_assign_wq() 641 fq->wq = 2; in dpaa_assign_wq() 645 fq->wq = 1; in dpaa_assign_wq() 649 fq->wq = 0; in dpaa_assign_wq() 658 fq->fq_type, fq->fqid); in dpaa_assign_wq() [all …]
|
/Linux-v5.4/drivers/crypto/caam/ |
D | qi.c | 141 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, in caam_fq_ern_cb() argument 222 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) in empty_retired_fq() argument 226 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | in empty_retired_fq() 231 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); in empty_retired_fq() 240 } while (fq->flags & QMAN_FQ_STATE_NE); in empty_retired_fq() 245 static int kill_fq(struct device *qidev, struct qman_fq *fq) in kill_fq() argument 250 ret = qman_retire_fq(fq, &flags); in kill_fq() 264 } while (fq->state != qman_fq_state_retired); in kill_fq() 266 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); in kill_fq() 267 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); in kill_fq() [all …]
|
/Linux-v5.4/drivers/net/ethernet/freescale/dpaa2/ |
D | dpaa2-eth-debugfs.c | 64 static char *fq_type_to_str(struct dpaa2_eth_fq *fq) in fq_type_to_str() argument 66 switch (fq->type) { in fq_type_to_str() 79 struct dpaa2_eth_fq *fq; in dpaa2_dbg_fqs_show() local 88 fq = &priv->fq[i]; in dpaa2_dbg_fqs_show() 89 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); in dpaa2_dbg_fqs_show() 94 fq->fqid, in dpaa2_dbg_fqs_show() 95 fq->target_cpu, in dpaa2_dbg_fqs_show() 96 fq_type_to_str(fq), in dpaa2_dbg_fqs_show() 97 fq->stats.frames, in dpaa2_dbg_fqs_show()
|
D | dpaa2-eth.c | 246 struct dpaa2_eth_fq *fq; in xdp_enqueue() local 265 fq = &priv->fq[queue_id]; in xdp_enqueue() 267 err = priv->enqueue(priv, fq, fd, 0); in xdp_enqueue() 355 struct dpaa2_eth_fq *fq) in dpaa2_eth_rx() argument 385 xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); in dpaa2_eth_rx() 433 skb_record_rx_queue(skb, fq->flowid); in dpaa2_eth_rx() 458 struct dpaa2_eth_fq *fq = NULL; in consume_frames() local 476 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); in consume_frames() 478 fq->consume(priv, ch, fd, fq); in consume_frames() 485 fq->stats.frames += cleaned; in consume_frames() [all …]
|
/Linux-v5.4/net/ipv4/ |
D | inet_fragment.c | 130 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local 133 count = del_timer_sync(&fq->timer) ? 1 : 0; in inet_frags_free_cb() 135 spin_lock_bh(&fq->lock); in inet_frags_free_cb() 136 if (!(fq->flags & INET_FRAG_COMPLETE)) { in inet_frags_free_cb() 137 fq->flags |= INET_FRAG_COMPLETE; in inet_frags_free_cb() 139 } else if (fq->flags & INET_FRAG_HASH_DEAD) { in inet_frags_free_cb() 142 spin_unlock_bh(&fq->lock); in inet_frags_free_cb() 144 if (refcount_sub_and_test(count, &fq->refcnt)) in inet_frags_free_cb() 145 inet_frag_destroy(fq); in inet_frags_free_cb() 194 void inet_frag_kill(struct inet_frag_queue *fq) in inet_frag_kill() argument [all …]
|
/Linux-v5.4/net/mac80211/ |
D | debugfs.c | 79 struct fq *fq = &local->fq; in aqm_read() local 83 spin_lock_bh(&local->fq.lock); in aqm_read() 97 fq->flows_cnt, in aqm_read() 98 fq->backlog, in aqm_read() 99 fq->overmemory, in aqm_read() 100 fq->overlimit, in aqm_read() 101 fq->collisions, in aqm_read() 102 fq->memory_usage, in aqm_read() 103 fq->memory_limit, in aqm_read() 104 fq->limit, in aqm_read() [all …]
|
D | tx.c | 1304 struct fq *fq; in codel_dequeue_func() local 1309 fq = &local->fq; in codel_dequeue_func() 1314 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func() 1316 return fq_flow_dequeue(fq, flow); in codel_dequeue_func() 1333 static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, in fq_tin_dequeue_func() argument 1343 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func() 1358 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func() 1371 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument 1378 local = container_of(fq, struct ieee80211_local, fq); in fq_skb_free_func() 1382 static struct fq_flow *fq_flow_get_default_func(struct fq *fq, in fq_flow_get_default_func() argument [all …]
|
/Linux-v5.4/samples/bpf/ |
D | xdpsock_user.c | 81 struct xsk_ring_prod fq; member 300 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, in xsk_configure_umem() 337 ret = xsk_ring_prod__reserve(&xsk->umem->fq, in xsk_configure_socket() 343 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) = in xsk_configure_socket() 345 xsk_ring_prod__submit(&xsk->umem->fq, in xsk_configure_socket() 509 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); in complete_tx_l2fwd() 513 if (xsk_ring_prod__needs_wakeup(&umem->fq)) in complete_tx_l2fwd() 515 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); in complete_tx_l2fwd() 519 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = in complete_tx_l2fwd() 522 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); in complete_tx_l2fwd() [all …]
|
/Linux-v5.4/include/soc/fsl/ |
D | qman.h | 299 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member 690 struct qman_fq *fq, 697 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq, 957 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq); 967 void qman_destroy_fq(struct qman_fq *fq); 973 u32 qman_fq_fqid(struct qman_fq *fq); 1009 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts); 1018 int qman_schedule_fq(struct qman_fq *fq); 1038 int qman_retire_fq(struct qman_fq *fq, u32 *flags); 1047 int qman_oos_fq(struct qman_fq *fq); [all …]
|
/Linux-v5.4/net/xdp/ |
D | xsk.c | 37 READ_ONCE(xs->umem->fq); in xsk_is_setup_for_bpf_map() 42 return xskq_has_addrs(umem->fq, cnt); in xsk_umem_has_addrs() 48 return xskq_peek_addr(umem->fq, addr, umem); in xsk_umem_peek_addr() 54 xskq_discard_addr(umem->fq); in xsk_umem_discard_addr() 63 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_rx_need_wakeup() 90 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_rx_need_wakeup() 149 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || in __xsk_rcv() 170 xskq_discard_addr(xs->umem->fq); in __xsk_rcv() 237 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || in xsk_generic_rcv() 252 xskq_discard_addr(xs->umem->fq); in xsk_generic_rcv() [all …]
|
D | xdp_umem.c | 237 if (umem->fq) { in xdp_umem_release() 238 xskq_destroy(umem->fq); in xdp_umem_release() 239 umem->fq = NULL; in xdp_umem_release() 459 return umem->fq && umem->cq; in xdp_umem_validate_queues()
|
/Linux-v5.4/tools/testing/selftests/netfilter/ |
D | nft_trans_stress.sh | 56 ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null & 57 ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
|