/Linux-v6.1/tools/testing/selftests/tc-testing/tc-tests/filters/ |
D | flow.json | 4 "name": "Add flow filter with map key and ops", 7 "flow" 15 …"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key ds… 17 "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow", 18 …"matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst … 26 "name": "Add flow filter with map key or ops", 29 "flow" 37 …"cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key ds… 39 "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow", 40 …"matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst.… [all …]
|
/Linux-v6.1/net/sched/ |
D | sch_atm.c | 24 * Each class maps the flow(s) it is handling to a given VC. Multiple classes 39 * - should lock the flow while there is data in the queue (?) 58 struct atm_flow_data *excess; /* flow for excess traffic; 71 /* ------------------------- Class/flow operations ------------------------- */ 76 struct atm_flow_data *flow; in lookup_flow() local 78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow() 79 if (flow->common.classid == classid) in lookup_flow() 80 return flow; in lookup_flow() 90 struct atm_flow_data *flow = (struct atm_flow_data *)arg; in atm_tc_graft() local 92 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", in atm_tc_graft() [all …]
|
D | sch_fq_codel.c | 32 * Each flow has a CoDel managed queue. 36 * For a given flow, packets are not reordered (CoDel uses a FIFO) 39 * Low memory footprint (64 bytes per flow) 116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument 118 struct sk_buff *skb = flow->head; in dequeue_head() 120 flow->head = skb->next; in dequeue_head() 125 /* add skb to flow queue (tail add) */ 126 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument 129 if (flow->head == NULL) in flow_queue_add() 130 flow->head = skb; in flow_queue_add() [all …]
|
D | sch_fq_pie.c | 2 /* Flow Queue PIE discipline 18 /* Flow Queue PIE 24 * - Each flow has a PIE managed queue. 27 * - For a given flow, packets are not reordered. 35 * struct fq_pie_flow - contains data for each flow 36 * @vars: pie vars associated with the flow 38 * @backlog: size of data in the flow 39 * @qlen: number of packets in the flow 40 * @flowchain: flowchain for the flow 41 * @head: first packet in the flow [all …]
|
D | sch_hhf.c | 20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified 37 * - For a heavy-hitter flow: *all* of its k array counters must be large. 38 * - For a non-heavy-hitter flow: some of its k array counters can be large 59 * Once a flow is classified as heavy-hitter, we also save its per-flow state 60 * in an exact-matching flow table so that its subsequent packets can be 66 * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching 67 * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter 70 * + If F decides that p belongs to a non-heavy-hitter flow, then send p 72 * + Otherwise, if F decides that p belongs to a new heavy-hitter flow, 73 * then set up a new flow entry for the flow-id of p in the table T and [all …]
|
/Linux-v6.1/drivers/net/phy/mscc/ |
D | mscc_macsec.c | 371 struct macsec_flow *flow) in vsc8584_macsec_flow() argument 374 enum macsec_bank bank = flow->bank; in vsc8584_macsec_flow() 375 u32 val, match = 0, mask = 0, action = 0, idx = flow->index; in vsc8584_macsec_flow() 377 if (flow->match.tagged) in vsc8584_macsec_flow() 379 if (flow->match.untagged) in vsc8584_macsec_flow() 382 if (bank == MACSEC_INGR && flow->assoc_num >= 0) { in vsc8584_macsec_flow() 383 match |= MSCC_MS_SAM_MISC_MATCH_AN(flow->assoc_num); in vsc8584_macsec_flow() 387 if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) { in vsc8584_macsec_flow() 388 u64 sci = (__force u64)flow->rx_sa->sc->sci; in vsc8584_macsec_flow() 400 if (flow->match.etype) { in vsc8584_macsec_flow() [all …]
|
/Linux-v6.1/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | flowring.c | 43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) in brcmf_flowring_is_tdls_mac() argument 47 search = flow->tdls_entry; in brcmf_flowring_is_tdls_mac() 59 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_lookup() argument 71 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_lookup() 77 if ((sta) && (flow->tdls_active) && in brcmf_flowring_lookup() 78 (brcmf_flowring_is_tdls_mac(flow, da))) { in brcmf_flowring_lookup() 85 hash = flow->hash; in brcmf_flowring_lookup() 103 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_create() argument 116 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_create() 122 if ((sta) && (flow->tdls_active) && in brcmf_flowring_create() [all …]
|
D | flowring.h | 50 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 52 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 54 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid); 55 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid); 56 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid); 57 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, 59 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid); 60 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, 62 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid); 63 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid); [all …]
|
/Linux-v6.1/include/net/ |
D | fq_impl.h | 16 __fq_adjust_removal(struct fq *fq, struct fq_flow *flow, unsigned int packets, in __fq_adjust_removal() argument 19 struct fq_tin *tin = flow->tin; in __fq_adjust_removal() 24 flow->backlog -= bytes; in __fq_adjust_removal() 28 if (flow->backlog) in __fq_adjust_removal() 31 if (flow == &tin->default_flow) { in __fq_adjust_removal() 36 idx = flow - fq->flows; in __fq_adjust_removal() 41 struct fq_flow *flow, in fq_adjust_removal() argument 44 __fq_adjust_removal(fq, flow, 1, skb->len, skb->truesize); in fq_adjust_removal() 48 struct fq_flow *flow) in fq_flow_dequeue() argument 54 skb = __skb_dequeue(&flow->queue); in fq_flow_dequeue() [all …]
|
/Linux-v6.1/drivers/gpu/ipu-v3/ |
D | ipu-dp.c | 46 u32 flow; member 64 struct ipu_flow flow[IPUV3_NUM_FLOWS]; member 82 struct ipu_flow *flow = to_flow(dp); in ipu_dp_set_global_alpha() local 83 struct ipu_dp_priv *priv = flow->priv; in ipu_dp_set_global_alpha() 88 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 93 writel(reg, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 96 reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL; in ipu_dp_set_global_alpha() 98 flow->base + DP_GRAPH_WIND_CTRL); in ipu_dp_set_global_alpha() 100 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 101 writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() [all …]
|
/Linux-v6.1/net/netfilter/ |
D | nf_flow_table_core.c | 21 flow_offload_fill_dir(struct flow_offload *flow, in flow_offload_fill_dir() argument 24 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; in flow_offload_fill_dir() 25 struct nf_conntrack_tuple *ctt = &flow->ct->tuplehash[dir].tuple; in flow_offload_fill_dir() 54 struct flow_offload *flow; in flow_offload_alloc() local 59 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in flow_offload_alloc() 60 if (!flow) in flow_offload_alloc() 64 flow->ct = ct; in flow_offload_alloc() 66 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_ORIGINAL); in flow_offload_alloc() 67 flow_offload_fill_dir(flow, FLOW_OFFLOAD_DIR_REPLY); in flow_offload_alloc() 70 __set_bit(NF_FLOW_SNAT, &flow->flags); in flow_offload_alloc() [all …]
|
D | nf_flow_table_offload.c | 24 struct flow_offload *flow; member 224 const struct flow_offload *flow, in flow_offload_eth_src() argument 236 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_src() 243 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_src() 271 const struct flow_offload *flow, in flow_offload_eth_dst() argument 286 this_tuple = &flow->tuplehash[dir].tuple; in flow_offload_eth_dst() 293 other_tuple = &flow->tuplehash[!dir].tuple; in flow_offload_eth_dst() 328 const struct flow_offload *flow, in flow_offload_ipv4_snat() argument 339 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in flow_offload_ipv4_snat() 343 addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in flow_offload_ipv4_snat() [all …]
|
D | nf_flow_table_ip.c | 21 static int nf_flow_state_check(struct flow_offload *flow, int proto, in nf_flow_state_check() argument 31 flow_offload_teardown(flow); in nf_flow_state_check() 75 static void nf_flow_snat_ip(const struct flow_offload *flow, in nf_flow_snat_ip() argument 84 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in nf_flow_snat_ip() 89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in nf_flow_snat_ip() 98 static void nf_flow_dnat_ip(const struct flow_offload *flow, in nf_flow_dnat_ip() argument 107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; in nf_flow_dnat_ip() 112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; in nf_flow_dnat_ip() 121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, in nf_flow_nat_ip() argument 125 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { in nf_flow_nat_ip() [all …]
|
/Linux-v6.1/Documentation/networking/ |
D | openvswitch.rst | 8 flow-level packet processing on selected network devices. It can be 10 VLAN processing, network access control, flow-based network control, 15 within a bridge). Each datapath also has associated with it a "flow 22 extracting its flow key and looking it up in the flow table. If there 23 is a matching flow, it executes the associated actions. If there is 25 its processing, userspace will likely set up a flow to handle further 29 Flow key compatibility 35 versions to parse additional protocols as part of the flow key. It 39 applications to work with any version of the flow key, past or future. 43 flow key that it parsed from the packet. Userspace then extracts its [all …]
|
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | tc_priv.h | 92 /* Flow can be associated with multiple encap IDs. 102 struct list_head peer; /* flows with peer flow */ 106 struct net_device *orig_dev; /* netdev adding flow first */ 108 struct list_head tmp_list; /* temporary flow list used by neigh update */ 132 struct mlx5e_tc_flow *flow, 137 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow); 139 void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow); 140 int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow); 142 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow); 143 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow); [all …]
|
D | tc_tun_encap.c | 72 int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow, in mlx5e_tc_set_attr_rx_tun() argument 75 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr; in mlx5e_tc_set_attr_rx_tun() 116 flow_flag_set(flow, TUN_RX); in mlx5e_tc_set_attr_rx_tun() 117 flow->attr->tun_ip_version = ip_version; in mlx5e_tc_set_attr_rx_tun() 126 /* Flow can be associated with multiple encap entries. in mlx5e_tc_flow_all_encaps_valid() 127 * Before offloading the flow verify that all of them have in mlx5e_tc_flow_all_encaps_valid() 152 struct mlx5e_tc_flow *flow; in mlx5e_tc_encap_flows_add() local 173 list_for_each_entry(flow, flow_list, tmp_list) { in mlx5e_tc_encap_flows_add() 174 if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW)) in mlx5e_tc_encap_flows_add() 177 spec = &flow->attr->parse_attr->spec; in mlx5e_tc_encap_flows_add() [all …]
|
/Linux-v6.1/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_tc.c | 44 /* Return the dst fid of the func for flow forwarding 370 struct bnxt_tc_flow *flow) in bnxt_tc_parse_flow() argument 387 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow() 388 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow() 392 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow() 393 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow() 401 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow() 402 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow() 403 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow() 404 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow() [all …]
|
/Linux-v6.1/net/openvswitch/ |
D | flow_table.c | 6 #include "flow.h" 74 struct sw_flow *flow; in ovs_flow_alloc() local 77 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); in ovs_flow_alloc() 78 if (!flow) in ovs_flow_alloc() 81 flow->stats_last_writer = -1; in ovs_flow_alloc() 92 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc() 94 cpumask_set_cpu(0, &flow->cpu_used_mask); in ovs_flow_alloc() 96 return flow; in ovs_flow_alloc() 98 kmem_cache_free(flow_cache, flow); in ovs_flow_alloc() 107 static void flow_free(struct sw_flow *flow) in flow_free() argument [all …]
|
/Linux-v6.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tc.c | 155 * last flow from a group and then deleting a group, we get into del_sw_flow_group() 161 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow); 162 static void free_flow_post_acts(struct mlx5e_tc_flow *flow); 410 mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); in mlx5e_tc_add_flow_meter() 571 struct mlx5e_tc_flow *flow); 573 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) in mlx5e_flow_get() argument 575 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) in mlx5e_flow_get() 577 return flow; in mlx5e_flow_get() 580 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) in mlx5e_flow_put() argument 582 if (refcount_dec_and_test(&flow->refcnt)) { in mlx5e_flow_put() [all …]
|
/Linux-v6.1/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 37 /* Maximum number of packets within a flow generation. */ 134 struct tid_rdma_flow *flow, 439 /* Flow and tid waiter functions */ 529 * This should be done after the hardware flow and 698 /* Flow functions */ 700 * kern_reserve_flow - allocate a hardware flow 702 * @last: the index of the preferred flow. Use RXE_NUM_TID_FLOWS to 706 * flow for use in receiving KDETH data packets. If a preferred flow is 707 * specified the function will attempt to reserve that flow again, if 721 /* Attempt to reserve the preferred flow index */ in kern_reserve_flow() [all …]
|
/Linux-v6.1/drivers/net/ethernet/netronome/nfp/flower/ |
D | conntrack.h | 38 * struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information 106 /* NFP flow entry flags. */ 111 * struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information 112 * @cookie: Flow cookie, same as original TC flow, used as key 114 * @chain_index: Chain index of the original flow 118 * @children: List of tc_merge flows this flow forms part of 119 * @rule: Reference to the original TC flow rule 122 * @flags: Used to indicate flow flag like NAT which used by merge. 140 * @cookie: Flow cookie, combination of pre and post ct cookies 161 * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow [all …]
|
/Linux-v6.1/Documentation/bpf/ |
D | prog_flow_dissector.rst | 10 Flow dissector is a routine that parses metadata out of the packets. It's 11 used in the various places in the networking subsystem (RFS, flow hash, etc). 13 BPF flow dissector is an attempt to reimplement C-based flow dissector logic 20 BPF flow dissector programs operate on an ``__sk_buff``. However, only the 22 ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input 31 Flow dissector BPF program should fill out the rest of the ``struct 41 In the VLAN-less case, this is what the initial state of the BPF flow 49 +-- flow dissector starts here 58 In case of VLAN, flow dissector can be called with the two different states. 67 +-- flow dissector starts here [all …]
|
/Linux-v6.1/drivers/net/ethernet/intel/ice/ |
D | ice_ethtool_fdir.c | 4 /* flow director ethtool support for ice */ 36 * flow type values 37 * @flow: filter type to be converted 39 * Returns the corresponding ethtool flow type. 41 static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) in ice_fltr_to_ethtool_flow() argument 43 switch (flow) { in ice_fltr_to_ethtool_flow() 61 /* 0 is undefined ethtool flow */ in ice_fltr_to_ethtool_flow() 67 * ice_ethtool_flow_to_fltr - convert ethtool flow type to filter enum 68 * @eth: Ethtool flow type to be converted 70 * Returns flow enum [all …]
|
/Linux-v6.1/Documentation/core-api/ |
D | genericirq.rst | 52 optimize the flow of the interrupt handling for each specific interrupt 58 the flow control in the super-handler. This leads to a mix of flow logic 62 have different flow handling. 64 A more natural abstraction is the clean separation of the 'irq flow' and 68 reveals that most of them can use a generic set of 'irq flow' methods 71 IRQ flow itself but not in the chip details - and thus provides a more 74 Each interrupt descriptor is assigned its own high-level flow handler, 76 flow handler implementation also makes it simple to provide 82 IRQ-flow implementation for 'level type' interrupts and add a 104 2. High-level IRQ flow handlers [all …]
|
/Linux-v6.1/samples/bpf/ |
D | sockex2_kern.c | 63 struct flow_key_record *flow) in parse_ip() argument 73 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); in parse_ip() 74 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); in parse_ip() 87 struct flow_key_record *flow) in parse_ipv6() argument 91 flow->src = ipv6_addr_hash(skb, in parse_ipv6() 93 flow->dst = ipv6_addr_hash(skb, in parse_ipv6() 101 struct flow_key_record *flow) in flow_dissector() argument 121 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector() 123 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); in flow_dissector() 159 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector() [all …]
|