Home
last modified time | relevance | path

Searched refs:fq (Results 1 – 25 of 46) sorted by relevance

12

/Linux-v4.19/include/net/
Dfq_impl.h15 static void fq_adjust_removal(struct fq *fq, in fq_adjust_removal() argument
24 fq->backlog--; in fq_adjust_removal()
25 fq->memory_usage -= skb->truesize; in fq_adjust_removal()
28 static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) in fq_rejigger_backlog() argument
37 list_for_each_entry_continue(i, &fq->backlogs, backlogchain) in fq_rejigger_backlog()
46 static struct sk_buff *fq_flow_dequeue(struct fq *fq, in fq_flow_dequeue() argument
51 lockdep_assert_held(&fq->lock); in fq_flow_dequeue()
57 fq_adjust_removal(fq, flow, skb); in fq_flow_dequeue()
58 fq_rejigger_backlog(fq, flow); in fq_flow_dequeue()
63 static struct sk_buff *fq_tin_dequeue(struct fq *fq, in fq_tin_dequeue() argument
[all …]
Dipv6_frag.h33 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6frag_init() local
37 fq->ecn = 0; in ip6frag_init()
48 const struct inet_frag_queue *fq = data; in ip6frag_obj_hashfn() local
50 return jhash2((const u32 *)&fq->key.v6, in ip6frag_obj_hashfn()
58 const struct inet_frag_queue *fq = ptr; in ip6frag_obj_cmpfn() local
60 return !!memcmp(&fq->key, key, sizeof(*key)); in ip6frag_obj_cmpfn()
64 ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) in ip6frag_expire_frag_queue() argument
70 spin_lock(&fq->q.lock); in ip6frag_expire_frag_queue()
72 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6frag_expire_frag_queue()
75 inet_frag_kill(&fq->q); in ip6frag_expire_frag_queue()
[all …]
Dfq.h68 struct fq { struct
84 typedef struct sk_buff *fq_tin_dequeue_t(struct fq *, argument
88 typedef void fq_skb_free_t(struct fq *,
94 typedef bool fq_skb_filter_t(struct fq *,
100 typedef struct fq_flow *fq_flow_get_default_t(struct fq *,
Dxdp_sock.h30 struct xsk_queue *fq; member
/Linux-v4.19/block/
Dblk-flush.c97 struct blk_flush_queue *fq, unsigned int flags);
166 struct blk_flush_queue *fq, in blk_flush_complete_seq() argument
170 struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; in blk_flush_complete_seq()
188 fq->flush_pending_since = jiffies; in blk_flush_complete_seq()
193 list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); in blk_flush_complete_seq()
217 kicked = blk_kick_flush(q, fq, cmd_flags); in blk_flush_complete_seq()
228 struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); in flush_end_io() local
234 spin_lock_irqsave(&fq->mq_flush_lock, flags); in flush_end_io()
237 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); in flush_end_io()
245 running = &fq->flush_queue[fq->flush_running_idx]; in flush_end_io()
[all …]
/Linux-v4.19/net/ieee802154/6lowpan/
Dreassembly.c37 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
51 struct frag_queue *fq; in lowpan_frag_expire() local
53 fq = container_of(frag, struct frag_queue, q); in lowpan_frag_expire()
55 spin_lock(&fq->q.lock); in lowpan_frag_expire()
57 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire()
60 inet_frag_kill(&fq->q); in lowpan_frag_expire()
62 spin_unlock(&fq->q.lock); in lowpan_frag_expire()
63 inet_frag_put(&fq->q); in lowpan_frag_expire()
88 static int lowpan_frag_queue(struct lowpan_frag_queue *fq, in lowpan_frag_queue() argument
95 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_queue()
[all …]
/Linux-v4.19/net/ipv6/
Dreassembly.c72 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
78 struct frag_queue *fq; in ip6_frag_expire() local
81 fq = container_of(frag, struct frag_queue, q); in ip6_frag_expire()
82 net = container_of(fq->q.net, struct net, ipv6.frags); in ip6_frag_expire()
84 ip6frag_expire_frag_queue(net, fq); in ip6_frag_expire()
110 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, in ip6_frag_queue() argument
120 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_frag_queue()
146 if (end < fq->q.len || in ip6_frag_queue()
147 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue()
149 fq->q.flags |= INET_FRAG_LAST_IN; in ip6_frag_queue()
[all …]
/Linux-v4.19/net/ipv4/
Dinet_fragment.c73 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local
79 if (!del_timer(&fq->timer)) in inet_frags_free_cb()
82 spin_lock_bh(&fq->lock); in inet_frags_free_cb()
83 if (!(fq->flags & INET_FRAG_COMPLETE)) { in inet_frags_free_cb()
84 fq->flags |= INET_FRAG_COMPLETE; in inet_frags_free_cb()
85 refcount_dec(&fq->refcnt); in inet_frags_free_cb()
87 spin_unlock_bh(&fq->lock); in inet_frags_free_cb()
89 inet_frag_put(fq); in inet_frags_free_cb()
100 void inet_frag_kill(struct inet_frag_queue *fq) in inet_frag_kill() argument
102 if (del_timer(&fq->timer)) in inet_frag_kill()
[all …]
/Linux-v4.19/net/ipv6/netfilter/
Dnf_conntrack_reasm.c147 struct frag_queue *fq; in nf_ct_frag6_expire() local
150 fq = container_of(frag, struct frag_queue, q); in nf_ct_frag6_expire()
151 net = container_of(fq->q.net, struct net, nf_frag.frags); in nf_ct_frag6_expire()
153 ip6frag_expire_frag_queue(net, fq); in nf_ct_frag6_expire()
177 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, in nf_ct_frag6_queue() argument
185 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue()
215 if (end < fq->q.len || in nf_ct_frag6_queue()
216 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue()
220 fq->q.flags |= INET_FRAG_LAST_IN; in nf_ct_frag6_queue()
221 fq->q.len = end; in nf_ct_frag6_queue()
[all …]
/Linux-v4.19/drivers/soc/fsl/qbman/
Dqman.c266 struct qm_mcc_fq fq; member
942 static inline void fq_set(struct qman_fq *fq, u32 mask) in fq_set() argument
944 fq->flags |= mask; in fq_set()
947 static inline void fq_clear(struct qman_fq *fq, u32 mask) in fq_clear() argument
949 fq->flags &= ~mask; in fq_clear()
952 static inline int fq_isset(struct qman_fq *fq, u32 mask) in fq_isset() argument
954 return fq->flags & mask; in fq_isset()
957 static inline int fq_isclear(struct qman_fq *fq, u32 mask) in fq_isclear() argument
959 return !(fq->flags & mask); in fq_isclear()
1036 struct qman_fq *fq; in idx_to_fq() local
[all …]
Dqman_test_api.c105 static int do_enqueues(struct qman_fq *fq) in do_enqueues() argument
111 if (qman_enqueue(fq, &fd)) { in do_enqueues()
125 struct qman_fq *fq = &fq_base; in qman_test_api() local
132 err = qman_create_fq(0, FQ_FLAGS, fq); in qman_test_api()
137 err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); in qman_test_api()
143 err = do_enqueues(fq); in qman_test_api()
148 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api()
153 err = do_enqueues(fq); in qman_test_api()
158 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api()
166 err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); in qman_test_api()
[all …]
Dqman_test_stash.c277 struct qman_fq *fq, in normal_dqrr() argument
280 struct hp_handler *handler = (struct hp_handler *)fq; in normal_dqrr()
295 struct qman_fq *fq, in special_dqrr() argument
298 struct hp_handler *handler = (struct hp_handler *)fq; in special_dqrr()
/Linux-v4.19/drivers/iommu/
Diova.c60 iovad->fq = NULL; in init_iova_domain()
70 if (!iovad->fq) in free_iova_flush_queue()
78 free_percpu(iovad->fq); in free_iova_flush_queue()
80 iovad->fq = NULL; in free_iova_flush_queue()
93 iovad->fq = alloc_percpu(struct iova_fq); in init_iova_flush_queue()
94 if (!iovad->fq) in init_iova_flush_queue()
101 struct iova_fq *fq; in init_iova_flush_queue() local
103 fq = per_cpu_ptr(iovad->fq, cpu); in init_iova_flush_queue()
104 fq->head = 0; in init_iova_flush_queue()
105 fq->tail = 0; in init_iova_flush_queue()
[all …]
/Linux-v4.19/drivers/net/ethernet/freescale/dpaa/
Ddpaa_eth_trace.h58 struct qman_fq *fq,
62 TP_ARGS(netdev, fq, fd),
80 __entry->fqid = fq->fqid;
104 struct qman_fq *fq,
107 TP_ARGS(netdev, fq, fd)
114 struct qman_fq *fq,
117 TP_ARGS(netdev, fq, fd)
124 struct qman_fq *fq,
127 TP_ARGS(netdev, fq, fd)
Ddpaa_eth_sysfs.c59 struct dpaa_fq *fq; in dpaa_eth_show_fqids() local
66 list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { in dpaa_eth_show_fqids()
67 switch (fq->fq_type) { in dpaa_eth_show_fqids()
93 if (prev && (abs(fq->fqid - prev->fqid) != 1 || in dpaa_eth_show_fqids()
104 if (prev && abs(fq->fqid - prev->fqid) == 1 && in dpaa_eth_show_fqids()
106 last_fqid = fq->fqid; in dpaa_eth_show_fqids()
108 first_fqid = fq->fqid; in dpaa_eth_show_fqids()
109 last_fqid = fq->fqid; in dpaa_eth_show_fqids()
112 prev = fq; in dpaa_eth_show_fqids()
Ddpaa_eth.c618 static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) in dpaa_assign_wq() argument
620 switch (fq->fq_type) { in dpaa_assign_wq()
623 fq->wq = 1; in dpaa_assign_wq()
627 fq->wq = 5; in dpaa_assign_wq()
631 fq->wq = 6; in dpaa_assign_wq()
637 fq->wq = 6; in dpaa_assign_wq()
641 fq->wq = 2; in dpaa_assign_wq()
645 fq->wq = 1; in dpaa_assign_wq()
649 fq->wq = 0; in dpaa_assign_wq()
658 fq->fq_type, fq->fqid); in dpaa_assign_wq()
[all …]
/Linux-v4.19/drivers/crypto/caam/
Dqi.c139 static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, in caam_fq_ern_cb() argument
216 static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) in empty_retired_fq() argument
220 ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | in empty_retired_fq()
225 dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); in empty_retired_fq()
234 } while (fq->flags & QMAN_FQ_STATE_NE); in empty_retired_fq()
239 static int kill_fq(struct device *qidev, struct qman_fq *fq) in kill_fq() argument
244 ret = qman_retire_fq(fq, &flags); in kill_fq()
258 } while (fq->state != qman_fq_state_retired); in kill_fq()
260 WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); in kill_fq()
261 WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); in kill_fq()
[all …]
/Linux-v4.19/net/mac80211/
Ddebugfs.c80 struct fq *fq = &local->fq; in aqm_read() local
84 spin_lock_bh(&local->fq.lock); in aqm_read()
98 fq->flows_cnt, in aqm_read()
99 fq->backlog, in aqm_read()
100 fq->overmemory, in aqm_read()
101 fq->overlimit, in aqm_read()
102 fq->collisions, in aqm_read()
103 fq->memory_usage, in aqm_read()
104 fq->memory_limit, in aqm_read()
105 fq->limit, in aqm_read()
[all …]
Dtx.c1299 struct fq *fq; in codel_dequeue_func() local
1304 fq = &local->fq; in codel_dequeue_func()
1309 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func()
1311 return fq_flow_dequeue(fq, flow); in codel_dequeue_func()
1328 static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, in fq_tin_dequeue_func() argument
1338 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func()
1353 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func()
1366 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument
1373 local = container_of(fq, struct ieee80211_local, fq); in fq_skb_free_func()
1377 static struct fq_flow *fq_flow_get_default_func(struct fq *fq, in fq_flow_get_default_func() argument
[all …]
Dagg-tx.c197 struct fq *fq; in ieee80211_agg_stop_txq() local
205 fq = &sdata->local->fq; in ieee80211_agg_stop_txq()
208 spin_lock_bh(&fq->lock); in ieee80211_agg_stop_txq()
210 spin_unlock_bh(&fq->lock); in ieee80211_agg_stop_txq()
/Linux-v4.19/samples/bpf/
Dxdpsock_user.c93 struct xdp_umem_uqueue fq; member
213 static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq, in umem_fill_to_kernel_ex() argument
219 if (umem_nb_free(fq, nb) < nb) in umem_fill_to_kernel_ex()
223 u32 idx = fq->cached_prod++ & fq->mask; in umem_fill_to_kernel_ex()
225 fq->ring[idx] = d[i].addr; in umem_fill_to_kernel_ex()
230 *fq->producer = fq->cached_prod; in umem_fill_to_kernel_ex()
235 static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d, in umem_fill_to_kernel() argument
240 if (umem_nb_free(fq, nb) < nb) in umem_fill_to_kernel()
244 u32 idx = fq->cached_prod++ & fq->mask; in umem_fill_to_kernel()
246 fq->ring[idx] = d[i]; in umem_fill_to_kernel()
[all …]
/Linux-v4.19/drivers/staging/fsl-dpaa2/ethernet/
Ddpaa2-eth.c295 struct dpaa2_eth_fq *fq; in consume_frames() local
313 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); in consume_frames()
314 fq->stats.frames++; in consume_frames()
316 fq->consume(priv, ch, fd, &ch->napi, fq->flowid); in consume_frames()
562 struct dpaa2_eth_fq *fq; in dpaa2_eth_tx() local
622 fq = &priv->fq[queue_mapping]; in dpaa2_eth_tx()
624 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, in dpaa2_eth_tx()
626 fq->tx_qdbin, &fd); in dpaa2_eth_tx()
1672 struct dpaa2_eth_fq *fq; in set_fq_affinity() local
1683 fq = &priv->fq[i]; in set_fq_affinity()
[all …]
/Linux-v4.19/include/soc/fsl/
Dqman.h299 } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ member
690 struct qman_fq *fq,
697 typedef void (*qman_cb_mr)(struct qman_portal *qm, struct qman_fq *fq,
957 int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq);
967 void qman_destroy_fq(struct qman_fq *fq);
973 u32 qman_fq_fqid(struct qman_fq *fq);
1009 int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts);
1018 int qman_schedule_fq(struct qman_fq *fq);
1038 int qman_retire_fq(struct qman_fq *fq, u32 *flags);
1047 int qman_oos_fq(struct qman_fq *fq);
[all …]
/Linux-v4.19/net/xdp/
Dxsk.c41 READ_ONCE(xs->umem->fq); in xsk_is_setup_for_bpf_map()
46 return xskq_peek_addr(umem->fq, addr); in xsk_umem_peek_addr()
52 xskq_discard_addr(umem->fq); in xsk_umem_discard_addr()
62 if (!xskq_peek_addr(xs->umem->fq, &addr) || in __xsk_rcv()
74 xskq_discard_addr(xs->umem->fq); in __xsk_rcv()
122 if (!xskq_peek_addr(xs->umem->fq, &addr) || in xsk_generic_rcv()
134 xskq_discard_addr(xs->umem->fq); in xsk_generic_rcv()
461 xskq_set_umem(xs->umem->fq, &xs->umem->props); in xsk_bind()
553 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : in xsk_setsockopt()
659 q = READ_ONCE(umem->fq); in xsk_mmap()
Dxdp_umem.c160 if (umem->fq) { in xdp_umem_release()
161 xskq_destroy(umem->fq); in xdp_umem_release()
162 umem->fq = NULL; in xdp_umem_release()
375 return umem->fq && umem->cq; in xdp_umem_validate_queues()

12