Home
last modified time | relevance | path

Searched full:flow (Results 1 – 25 of 1830) sorted by relevance

12345678910>>...74

/Linux-v5.15/net/sched/
Dsch_atm.c24 * Each class maps the flow(s) it is handling to a given VC. Multiple classes
39 * - should lock the flow while there is data in the queue (?)
58 struct atm_flow_data *excess; /* flow for excess traffic;
71 /* ------------------------- Class/flow operations ------------------------- */
76 struct atm_flow_data *flow; in lookup_flow() local
78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow()
79 if (flow->common.classid == classid) in lookup_flow()
80 return flow; in lookup_flow()
90 struct atm_flow_data *flow = (struct atm_flow_data *)arg; in atm_tc_graft() local
92 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", in atm_tc_graft()
[all …]
Dsch_fq_codel.c32 * Each flow has a CoDel managed queue.
36 * For a given flow, packets are not reordered (CoDel uses a FIFO)
39 * Low memory footprint (64 bytes per flow)
116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument
118 struct sk_buff *skb = flow->head; in dequeue_head()
120 flow->head = skb->next; in dequeue_head()
125 /* add skb to flow queue (tail add) */
126 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument
129 if (flow->head == NULL) in flow_queue_add()
130 flow->head = skb; in flow_queue_add()
[all …]
Dsch_fq_pie.c2 /* Flow Queue PIE discipline
18 /* Flow Queue PIE
24 * - Each flow has a PIE managed queue.
27 * - For a given flow, packets are not reordered.
35 * struct fq_pie_flow - contains data for each flow
36 * @vars: pie vars associated with the flow
38 * @backlog: size of data in the flow
39 * @qlen: number of packets in the flow
40 * @flowchain: flowchain for the flow
41 * @head: first packet in the flow
[all …]
/Linux-v5.15/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dflowring.c43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) in brcmf_flowring_is_tdls_mac() argument
47 search = flow->tdls_entry; in brcmf_flowring_is_tdls_mac()
59 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_lookup() argument
71 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_lookup()
77 if ((sta) && (flow->tdls_active) && in brcmf_flowring_lookup()
78 (brcmf_flowring_is_tdls_mac(flow, da))) { in brcmf_flowring_lookup()
85 hash = flow->hash; in brcmf_flowring_lookup()
103 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_create() argument
116 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_create()
122 if ((sta) && (flow->tdls_active) && in brcmf_flowring_create()
[all …]
Dflowring.h50 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
52 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
54 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid);
55 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid);
56 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid);
57 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
59 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid);
60 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
62 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid);
63 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid);
[all …]
/Linux-v5.15/include/net/
Dfq_impl.h16 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, in __fq_adjust_removal() argument
19 struct fq_tin *tin = flow->tin; in __fq_adjust_removal()
24 flow->backlog -= bytes; in __fq_adjust_removal()
28 if (flow->backlog) in __fq_adjust_removal()
31 if (flow == &tin->default_flow) { in __fq_adjust_removal()
36 idx = flow - fq->flows; in __fq_adjust_removal()
41 struct fq_flow *flow, in fq_adjust_removal() argument
44 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); in fq_adjust_removal()
48 struct fq_flow *flow) in fq_flow_dequeue() argument
54 skb = __skb_dequeue(&flow->queue); in fq_flow_dequeue()
[all …]
/Linux-v5.15/drivers/net/phy/mscc/
Dmscc_macsec.c371 struct macsec_flow *flow) in vsc8584_macsec_flow() argument
374 enum macsec_bank bank = flow->bank; in vsc8584_macsec_flow()
375 u32 val, match = 0, mask = 0, action = 0, idx = flow->index; in vsc8584_macsec_flow()
377 if (flow->match.tagged) in vsc8584_macsec_flow()
379 if (flow->match.untagged) in vsc8584_macsec_flow()
382 if (bank == MACSEC_INGR && flow->assoc_num >= 0) { in vsc8584_macsec_flow()
383 match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num); in vsc8584_macsec_flow()
387 if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) { in vsc8584_macsec_flow()
388 u64 sci = (__force u64)flow->rx_sa->sc->sci; in vsc8584_macsec_flow()
400 if (flow->match.etype) { in vsc8584_macsec_flow()
[all …]
/Linux-v5.15/drivers/gpu/ipu-v3/
Dipu-dp.c46 u32 flow; member
64 struct ipu_flow flow[IPUV3_NUM_FLOWS]; member
82 struct ipu_flow *flow = to_flow(dp); in ipu_dp_set_global_alpha() local
83 struct ipu_dp_priv *priv = flow->priv; in ipu_dp_set_global_alpha()
88 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
93 writel(reg, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
96 reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL; in ipu_dp_set_global_alpha()
98 flow->base + DP_GRAPH_WIND_CTRL); in ipu_dp_set_global_alpha()
100 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
101 writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha()
[all …]
/Linux-v5.15/net/netfilter/
Dnf_flow_table_core.c21 flow_offload_fill_dir(struct flow_offload *flow, in flow_offload_fill_dir() argument
24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; in flow_offload_fill_dir()
25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; in flow_offload_fill_dir()
48 struct flow_offload *flow; in flow_offload_alloc() local
54 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in flow_offload_alloc()
55 if (!flow) in flow_offload_alloc()
58 flow->ct = ct; in flow_offload_alloc()
60 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); in flow_offload_alloc()
61 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); in flow_offload_alloc()
64 __set_bit(NF_FLOW_SNAT, &flow->flags); in flow_offload_alloc()
[all …]
Dnf_flow_table_offload.c25 struct flow_offload *flow; member
213 const struct flow_offload *flow, in flow_offload_eth_src() argument
225 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_src()
232 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_src()
260 const struct flow_offload *flow, in flow_offload_eth_dst() argument
275 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_dst()
282 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_dst()
317 const struct flow_offload *flow, in flow_offload_ipv4_snat() argument
328 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in flow_offload_ipv4_snat()
332 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in flow_offload_ipv4_snat()
[all …]
Dnf_flow_table_ip.c23 static int nf_flow_state_check(struct flow_offload *flow, int proto, in nf_flow_state_check() argument
33 flow_offload_teardown(flow); in nf_flow_state_check()
77 static void nf_flow_snat_ip(const struct flow_offload *flow, in nf_flow_snat_ip() argument
86 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in nf_flow_snat_ip()
91 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in nf_flow_snat_ip()
100 static void nf_flow_dnat_ip(const struct flow_offload *flow, in nf_flow_dnat_ip() argument
109 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; in nf_flow_dnat_ip()
114 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; in nf_flow_dnat_ip()
123 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, in nf_flow_nat_ip() argument
127 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { in nf_flow_nat_ip()
[all …]
Dnf_tables_offload.c12 struct nft_flow_rule *flow; in nft_flow_rule_alloc() local
14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); in nft_flow_rule_alloc()
15 if (!flow) in nft_flow_rule_alloc()
18 flow->rule = flow_rule_alloc(num_actions); in nft_flow_rule_alloc()
19 if (!flow->rule) { in nft_flow_rule_alloc()
20 kfree(flow); in nft_flow_rule_alloc()
24 flow->rule->match.dissector = &flow->match.dissector; in nft_flow_rule_alloc()
25 flow->rule->match.mask = &flow->match.mask; in nft_flow_rule_alloc()
26 flow->rule->match.key = &flow->match.key; in nft_flow_rule_alloc()
28 return flow; in nft_flow_rule_alloc()
[all …]
/Linux-v5.15/Documentation/networking/
Dopenvswitch.rst8 flow-level packet processing on selected network devices. It can be
10 VLAN processing, network access control, flow-based network control,
15 within a bridge). Each datapath also has associated with it a "flow
22 extracting its flow key and looking it up in the flow table. If there
23 is a matching flow, it executes the associated actions. If there is
25 its processing, userspace will likely set up a flow to handle further
29 Flow key compatibility
35 versions to parse additional protocols as part of the flow key. It
39 applications to work with any version of the flow key, past or future.
43 flow key that it parsed from the packet. Userspace then extracts its
[all …]
/Linux-v5.15/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_tc.c44 /* Return the dst fid of the func for flow forwarding
370 struct bnxt_tc_flow *flow) in bnxt_tc_parse_flow() argument
387 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow()
388 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow()
392 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow()
393 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow()
401 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow()
402 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow()
403 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow()
404 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow()
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en/
Dtc_priv.h86 /* Flow can be associated with multiple encap IDs.
95 struct list_head peer; /* flows with peer flow */
99 struct net_device *orig_dev; /* netdev adding flow first */
101 struct list_head tmp_list; /* temporary flow list used by neigh update */
105 int tunnel_id; /* the mapped tunnel id of this flow */
113 struct mlx5e_tc_flow *flow,
117 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
119 static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag) in __flow_flag_set() argument
123 set_bit(flag, &flow->flags); in __flow_flag_set()
126 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag) argument
[all …]
Dtc_tun_encap.c48 int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, in mlx5e_tc_set_attr_rx_tun() argument
51 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; in mlx5e_tc_set_attr_rx_tun()
92 flow_flag_set(flow, TUN_RX); in mlx5e_tc_set_attr_rx_tun()
93 flow->attr->tun_ip_version = ip_version; in mlx5e_tc_set_attr_rx_tun()
102 /* Flow can be associated with multiple encap entries. in mlx5e_tc_flow_all_encaps_valid()
103 * Before offloading the flow verify that all of them have in mlx5e_tc_flow_all_encaps_valid()
128 struct mlx5e_tc_flow *flow; in mlx5e_tc_encap_flows_add() local
149 list_for_each_entry(flow, flow_list, tmp_list) { in mlx5e_tc_encap_flows_add()
150 if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW)) in mlx5e_tc_encap_flows_add()
152 attr = flow->attr; in mlx5e_tc_encap_flows_add()
[all …]
/Linux-v5.15/net/openvswitch/
Dflow_table.c6 #include "flow.h"
74 struct sw_flow *flow; in ovs_flow_alloc() local
77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); in ovs_flow_alloc()
78 if (!flow) in ovs_flow_alloc()
81 flow->stats_last_writer = -1; in ovs_flow_alloc()
92 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc()
94 cpumask_set_cpu(0, &flow->cpu_used_mask); in ovs_flow_alloc()
96 return flow; in ovs_flow_alloc()
98 kmem_cache_free(flow_cache, flow); in ovs_flow_alloc()
107 static void flow_free(struct sw_flow *flow) in flow_free() argument
[all …]
/Linux-v5.15/drivers/infiniband/hw/hfi1/
Dtid_rdma.c37 /* Maximum number of packets within a flow generation. */
134 struct tid_rdma_flow *flow,
439 /* Flow and tid waiter functions */
529 * This should be done after the hardware flow and
698 /* Flow functions */
700 * kern_reserve_flow - allocate a hardware flow
702 * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
706 * flow for use in receiving KDETH data packets. If a preferred flow is
707 * specified the function will attempt to reserve that flow again, if
721 /* Attempt to reserve the preferred flow index */ in kern_reserve_flow()
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/
Den_tc.c117 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
123 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
369 struct mlx5e_tc_flow *flow);
371 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) in mlx5e_flow_get() argument
373 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) in mlx5e_flow_get()
375 return flow; in mlx5e_flow_get()
378 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) in mlx5e_flow_put() argument
380 if (refcount_dec_and_test(&flow->refcnt)) { in mlx5e_flow_put()
381 mlx5e_tc_del_flow(priv, flow); in mlx5e_flow_put()
382 kfree_rcu(flow, rcu_head); in mlx5e_flow_put()
[all …]
/Linux-v5.15/drivers/net/ethernet/netronome/nfp/flower/
Dconntrack.h38 * struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
107 * struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
108 * @cookie: Flow cookie, same as original TC flow, used as key
110 * @chain_index: Chain index of the original flow
114 * @children: List of tc_merge flows this flow forms part of
115 * @rule: Reference to the original TC flow rule
134 * @cookie: Flow cookie, combination of pre and post ct cookies
155 * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow
157 * @cookie: Flow cookie, combination of tc_merge and nft cookies
164 * @tc_flower_cookie: The cookie of the flow offloaded to the nfp
[all …]
/Linux-v5.15/Documentation/bpf/
Dprog_flow_dissector.rst10 Flow dissector is a routine that parses metadata out of the packets. It's
11 used in the various places in the networking subsystem (RFS, flow hash, etc).
13 BPF flow dissector is an attempt to reimplement C-based flow dissector logic
20 BPF flow dissector programs operate on an ``__sk_buff``. However, only the
22 ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
31 Flow dissector BPF program should fill out the rest of the ``struct
41 In the VLAN-less case, this is what the initial state of the BPF flow
49 +-- flow dissector starts here
58 In case of VLAN, flow dissector can be called with the two different states.
67 +-- flow dissector starts here
[all …]
/Linux-v5.15/drivers/net/ethernet/intel/ice/
Dice_ethtool_fdir.c4 /* flow director ethtool support for ice */
35 * flow type values
36 * @flow: filter type to be converted
38 * Returns the corresponding ethtool flow type.
40 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) in ice_fltr_to_ethtool_flow() argument
42 switch (flow) { in ice_fltr_to_ethtool_flow()
60 /* 0 is undefined ethtool flow */ in ice_fltr_to_ethtool_flow()
66 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
67 * @eth: Ethtool flow type to be converted
69 * Returns flow enum
[all …]
/Linux-v5.15/Documentation/core-api/
Dgenericirq.rst52 optimize the flow of the interrupt handling for each specific interrupt
58 the flow control in the super-handler. This leads to a mix of flow logic
62 have different flow handling.
64 A more natural abstraction is the clean separation of the 'irq flow' and
68 reveals that most of them can use a generic set of 'irq flow' methods
71 IRQ flow itself but not in the chip details - and thus provides a more
74 Each interrupt descriptor is assigned its own high-level flow handler,
76 flow handler implementation also makes it simple to provide
82 IRQ-flow implementation for 'level type' interrupts and add a
104 2. High-level IRQ flow handlers
[all …]
/Linux-v5.15/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
Dtls.c39 static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) in mlx5e_tls_set_ipv4_flow() argument
43 MLX5_SET(tls_flow, flow, ipv6, 0); in mlx5e_tls_set_ipv4_flow()
44 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), in mlx5e_tls_set_ipv4_flow()
46 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), in mlx5e_tls_set_ipv4_flow()
51 static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) in mlx5e_tls_set_ipv6_flow() argument
55 MLX5_SET(tls_flow, flow, ipv6, 1); in mlx5e_tls_set_ipv6_flow()
56 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), in mlx5e_tls_set_ipv6_flow()
58 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), in mlx5e_tls_set_ipv6_flow()
63 static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) in mlx5e_tls_set_flow_tcp_ports() argument
67 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, in mlx5e_tls_set_flow_tcp_ports()
[all …]
/Linux-v5.15/samples/bpf/
Dsockex2_kern.c63 struct flow_key_record *flow) in parse_ip() argument
73 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); in parse_ip()
74 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); in parse_ip()
87 struct flow_key_record *flow) in parse_ipv6() argument
91 flow->src = ipv6_addr_hash(skb, in parse_ipv6()
93 flow->dst = ipv6_addr_hash(skb, in parse_ipv6()
101 struct flow_key_record *flow) in flow_dissector() argument
121 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector()
123 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); in flow_dissector()
159 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector()
[all …]

12345678910>>...74