/Linux-v5.4/include/net/ |
D | fq_impl.h | 15 struct fq_flow *flow, in fq_adjust_removal() argument 18 struct fq_tin *tin = flow->tin; in fq_adjust_removal() 22 flow->backlog -= skb->len; in fq_adjust_removal() 27 static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) in fq_rejigger_backlog() argument 31 if (flow->backlog == 0) { in fq_rejigger_backlog() 32 list_del_init(&flow->backlogchain); in fq_rejigger_backlog() 34 i = flow; in fq_rejigger_backlog() 37 if (i->backlog < flow->backlog) in fq_rejigger_backlog() 40 list_move_tail(&flow->backlogchain, in fq_rejigger_backlog() 46 struct fq_flow *flow) in fq_flow_dequeue() argument [all …]
|
/Linux-v5.4/net/sched/ |
D | sch_atm.c | 24 * Each class maps the flow(s) it is handling to a given VC. Multiple classes 39 * - should lock the flow while there is data in the queue (?) 58 struct atm_flow_data *excess; /* flow for excess traffic; 71 /* ------------------------- Class/flow operations ------------------------- */ 76 struct atm_flow_data *flow; in lookup_flow() local 78 list_for_each_entry(flow, &p->flows, list) { in lookup_flow() 79 if (flow->common.classid == classid) in lookup_flow() 80 return flow; in lookup_flow() 90 struct atm_flow_data *flow = (struct atm_flow_data *)arg; in atm_tc_graft() local 92 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", in atm_tc_graft() [all …]
|
D | sch_fq_codel.c | 33 * Each flow has a CoDel managed queue. 37 * For a given flow, packets are not reordered (CoDel uses a FIFO) 40 * Low memory footprint (64 bytes per flow) 117 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow) in dequeue_head() argument 119 struct sk_buff *skb = flow->head; in dequeue_head() 121 flow->head = skb->next; in dequeue_head() 126 /* add skb to flow queue (tail add) */ 127 static inline void flow_queue_add(struct fq_codel_flow *flow, in flow_queue_add() argument 130 if (flow->head == NULL) in flow_queue_add() 131 flow->head = skb; in flow_queue_add() [all …]
|
D | sch_hhf.c | 20 * buckets. Initially, a new flow starts as non-heavy-hitter. Once classified 37 * - For a heavy-hitter flow: *all* of its k array counters must be large. 38 * - For a non-heavy-hitter flow: some of its k array counters can be large 59 * Once a flow is classified as heavy-hitter, we also save its per-flow state 60 * in an exact-matching flow table so that its subsequent packets can be 66 * - If the flow-id of p (e.g., TCP 5-tuple) is already in the exact-matching 67 * heavy-hitter flow table, denoted table T, then send p to the heavy-hitter 70 * + If F decides that p belongs to a non-heavy-hitter flow, then send p 72 * + Otherwise, if F decides that p belongs to a new heavy-hitter flow, 73 * then set up a new flow entry for the flow-id of p in the table T and [all …]
|
/Linux-v5.4/drivers/net/wireless/broadcom/brcm80211/brcmfmac/ |
D | flowring.c | 43 brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN]) in brcmf_flowring_is_tdls_mac() argument 47 search = flow->tdls_entry; in brcmf_flowring_is_tdls_mac() 59 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_lookup() argument 71 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_lookup() 77 if ((sta) && (flow->tdls_active) && in brcmf_flowring_lookup() 78 (brcmf_flowring_is_tdls_mac(flow, da))) { in brcmf_flowring_lookup() 85 hash = flow->hash; in brcmf_flowring_lookup() 103 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], in brcmf_flowring_create() argument 116 sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); in brcmf_flowring_create() 122 if ((sta) && (flow->tdls_active) && in brcmf_flowring_create() [all …]
|
D | flowring.h | 50 u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 52 u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], 54 void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid); 55 void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid); 56 u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid); 57 u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, 59 struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid); 60 void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, 62 u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid); 63 u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid); [all …]
|
/Linux-v5.4/drivers/gpu/ipu-v3/ |
D | ipu-dp.c | 45 u32 flow; member 63 struct ipu_flow flow[IPUV3_NUM_FLOWS]; member 81 struct ipu_flow *flow = to_flow(dp); in ipu_dp_set_global_alpha() local 82 struct ipu_dp_priv *priv = flow->priv; in ipu_dp_set_global_alpha() 87 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 92 writel(reg, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 95 reg = readl(flow->base + DP_GRAPH_WIND_CTRL) & 0x00FFFFFFL; in ipu_dp_set_global_alpha() 97 flow->base + DP_GRAPH_WIND_CTRL); in ipu_dp_set_global_alpha() 99 reg = readl(flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() 100 writel(reg | DP_COM_CONF_GWAM, flow->base + DP_COM_CONF); in ipu_dp_set_global_alpha() [all …]
|
/Linux-v5.4/net/openvswitch/ |
D | flow_table.c | 6 #include "flow.h" 68 struct sw_flow *flow; in ovs_flow_alloc() local 71 flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL); in ovs_flow_alloc() 72 if (!flow) in ovs_flow_alloc() 75 flow->stats_last_writer = -1; in ovs_flow_alloc() 86 RCU_INIT_POINTER(flow->stats[0], stats); in ovs_flow_alloc() 88 cpumask_set_cpu(0, &flow->cpu_used_mask); in ovs_flow_alloc() 90 return flow; in ovs_flow_alloc() 92 kmem_cache_free(flow_cache, flow); in ovs_flow_alloc() 101 static void flow_free(struct sw_flow *flow) in flow_free() argument [all …]
|
/Linux-v5.4/Documentation/networking/ |
D | openvswitch.txt | 5 flow-level packet processing on selected network devices. It can be 7 VLAN processing, network access control, flow-based network control, 12 within a bridge). Each datapath also has associated with it a "flow 19 extracting its flow key and looking it up in the flow table. If there 20 is a matching flow, it executes the associated actions. If there is 22 its processing, userspace will likely set up a flow to handle further 26 Flow key compatibility 32 versions to parse additional protocols as part of the flow key. It 36 applications to work with any version of the flow key, past or future. 40 flow key that it parsed from the packet. Userspace then extracts its [all …]
|
D | scaling.rst | 19 - RFS: Receive Flow Steering 20 - Accelerated Receive Flow Steering 31 of logical flows. Packets for each flow are steered to a separate receive 131 flow hash over the packet’s addresses or ports (2-tuple or 4-tuple hash 133 associated flow of the packet. The hash is either provided by hardware 138 packet’s flow. 142 an index into the list is computed from the flow hash modulo the size 183 RPS Flow Limit 187 reordering. The trade-off to sending all packets from the same flow 189 In the extreme case a single flow dominates traffic. Especially on [all …]
|
/Linux-v5.4/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt_tc.c | 39 /* Return the dst fid of the func for flow forwarding 174 struct bnxt_tc_flow *flow) in bnxt_tc_parse_flow() argument 191 flow->l2_key.ether_type = match.key->n_proto; in bnxt_tc_parse_flow() 192 flow->l2_mask.ether_type = match.mask->n_proto; in bnxt_tc_parse_flow() 196 flow->l4_key.ip_proto = match.key->ip_proto; in bnxt_tc_parse_flow() 197 flow->l4_mask.ip_proto = match.mask->ip_proto; in bnxt_tc_parse_flow() 205 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; in bnxt_tc_parse_flow() 206 ether_addr_copy(flow->l2_key.dmac, match.key->dst); in bnxt_tc_parse_flow() 207 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst); in bnxt_tc_parse_flow() 208 ether_addr_copy(flow->l2_key.smac, match.key->src); in bnxt_tc_parse_flow() [all …]
|
/Linux-v5.4/net/netfilter/ |
D | nf_flow_table_core.c | 18 struct flow_offload flow; member 27 flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct, in flow_offload_fill_dir() argument 31 struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple; in flow_offload_fill_dir() 64 struct flow_offload *flow; in flow_offload_alloc() local 74 flow = &entry->flow; in flow_offload_alloc() 84 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL); in flow_offload_alloc() 85 flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY); in flow_offload_alloc() 88 flow->flags |= FLOW_OFFLOAD_SNAT; in flow_offload_alloc() 90 flow->flags |= FLOW_OFFLOAD_DNAT; in flow_offload_alloc() 92 return flow; in flow_offload_alloc() [all …]
|
D | nf_flow_table_ip.c | 19 static int nf_flow_state_check(struct flow_offload *flow, int proto, in nf_flow_state_check() argument 32 flow_offload_teardown(flow); in nf_flow_state_check() 92 static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb, in nf_flow_snat_ip() argument 101 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; in nf_flow_snat_ip() 106 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; in nf_flow_snat_ip() 117 static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb, in nf_flow_dnat_ip() argument 126 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; in nf_flow_dnat_ip() 131 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; in nf_flow_dnat_ip() 142 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, in nf_flow_nat_ip() argument 147 if (flow->flags & FLOW_OFFLOAD_SNAT && in nf_flow_nat_ip() [all …]
|
D | nf_tables_offload.c | 12 struct nft_flow_rule *flow; in nft_flow_rule_alloc() local 14 flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL); in nft_flow_rule_alloc() 15 if (!flow) in nft_flow_rule_alloc() 18 flow->rule = flow_rule_alloc(num_actions); in nft_flow_rule_alloc() 19 if (!flow->rule) { in nft_flow_rule_alloc() 20 kfree(flow); in nft_flow_rule_alloc() 24 flow->rule->match.dissector = &flow->match.dissector; in nft_flow_rule_alloc() 25 flow->rule->match.mask = &flow->match.mask; in nft_flow_rule_alloc() 26 flow->rule->match.key = &flow->match.key; in nft_flow_rule_alloc() 28 return flow; in nft_flow_rule_alloc() [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_tc.c | 118 /* Flow can be associated with multiple encap IDs. 128 struct list_head peer; /* flows with peer flow */ 131 struct list_head tmp_list; /* temporary flow list used by neigh update */ 214 struct mlx5e_tc_flow *flow); 216 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow) in mlx5e_flow_get() argument 218 if (!flow || !refcount_inc_not_zero(&flow->refcnt)) in mlx5e_flow_get() 220 return flow; in mlx5e_flow_get() 224 struct mlx5e_tc_flow *flow) in mlx5e_flow_put() argument 226 if (refcount_dec_and_test(&flow->refcnt)) { in mlx5e_flow_put() 227 mlx5e_tc_del_flow(priv, flow); in mlx5e_flow_put() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 37 /* Maximum number of packets within a flow generation. */ 134 struct tid_rdma_flow *flow, 438 /* Flow and tid waiter functions */ 525 * This should be done after the hardware flow and 693 /* Flow functions */ 695 * kern_reserve_flow - allocate a hardware flow 697 * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to 701 * flow for use in receiving KDETH data packets. If a preferred flow is 702 * specified the function will attempt to reserve that flow again, if 716 /* Attempt to reserve the preferred flow index */ in kern_reserve_flow() [all …]
|
/Linux-v5.4/Documentation/bpf/ |
D | prog_flow_dissector.rst | 10 Flow dissector is a routine that parses metadata out of the packets. It's 11 used in the various places in the networking subsystem (RFS, flow hash, etc). 13 BPF flow dissector is an attempt to reimplement C-based flow dissector logic 20 BPF flow dissector programs operate on an ``__sk_buff``. However, only the 22 ``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input 31 Flow dissector BPF program should fill out the rest of the ``struct 41 In the VLAN-less case, this is what the initial state of the BPF flow 49 +-- flow dissector starts here 58 In case of VLAN, flow dissector can be called with the two different states. 67 +-- flow dissector starts here [all …]
|
/Linux-v5.4/Documentation/core-api/ |
D | genericirq.rst | 52 optimize the flow of the interrupt handling for each specific interrupt 58 the flow control in the super-handler. This leads to a mix of flow logic 62 have different flow handling. 64 A more natural abstraction is the clean separation of the 'irq flow' and 68 reveals that most of them can use a generic set of 'irq flow' methods 71 IRQ flow itself but not in the chip details - and thus provides a more 74 Each interrupt descriptor is assigned its own high-level flow handler, 76 flow handler implementation also makes it simple to provide 82 IRQ-flow implementation for 'level type' interrupts and add a 104 2. High-level IRQ flow handlers [all …]
|
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ |
D | tls.c | 39 static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk) in mlx5e_tls_set_ipv4_flow() argument 43 MLX5_SET(tls_flow, flow, ipv6, 0); in mlx5e_tls_set_ipv4_flow() 44 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4), in mlx5e_tls_set_ipv4_flow() 46 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4), in mlx5e_tls_set_ipv4_flow() 51 static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk) in mlx5e_tls_set_ipv6_flow() argument 55 MLX5_SET(tls_flow, flow, ipv6, 1); in mlx5e_tls_set_ipv6_flow() 56 memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6), in mlx5e_tls_set_ipv6_flow() 58 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6), in mlx5e_tls_set_ipv6_flow() 63 static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk) in mlx5e_tls_set_flow_tcp_ports() argument 67 memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport, in mlx5e_tls_set_flow_tcp_ports() [all …]
|
/Linux-v5.4/drivers/net/ethernet/netronome/nfp/flower/ |
D | match.c | 13 struct flow_cls_offload *flow, u8 key_type) in nfp_flower_compile_meta_tci() argument 15 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in nfp_flower_compile_meta_tci() 81 struct flow_cls_offload *flow) in nfp_flower_compile_mac() argument 83 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in nfp_flower_compile_mac() 133 struct flow_cls_offload *flow) in nfp_flower_compile_tport() argument 135 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in nfp_flower_compile_tport() 154 struct flow_cls_offload *flow) in nfp_flower_compile_ip_ext() argument 156 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in nfp_flower_compile_ip_ext() 228 struct flow_cls_offload *flow) in nfp_flower_compile_ipv4() argument 230 struct flow_rule *rule = flow_cls_offload_flow_rule(flow); in nfp_flower_compile_ipv4() [all …]
|
/Linux-v5.4/drivers/net/ethernet/intel/igc/ |
D | igc_mac.c | 75 * igc_set_fc_watermarks - Set flow control high/low watermarks 78 * Sets the flow control high/low threshold (watermark) registers. If 79 * flow control XON frame transmission is enabled, then set XON frame 86 /* Set the flow control receive threshold registers. Normally, in igc_set_fc_watermarks() 110 * igc_setup_link - Setup flow control and link settings 113 * Determines which flow control settings to use, then configures flow 129 /* If requested flow control is set to default, set flow control in igc_setup_link() 135 /* We want to save off the original Flow Control configuration just in igc_setup_link() 137 * hub or switch with different Flow Control capabilities. in igc_setup_link() 148 /* Initialize the flow control address, type, and PAUSE timer in igc_setup_link() [all …]
|
/Linux-v5.4/samples/bpf/ |
D | sockex2_kern.c | 62 struct flow_key_record *flow) in parse_ip() argument 72 flow->src = load_word(skb, nhoff + offsetof(struct iphdr, saddr)); in parse_ip() 73 flow->dst = load_word(skb, nhoff + offsetof(struct iphdr, daddr)); in parse_ip() 86 struct flow_key_record *flow) in parse_ipv6() argument 90 flow->src = ipv6_addr_hash(skb, in parse_ipv6() 92 flow->dst = ipv6_addr_hash(skb, in parse_ipv6() 100 struct flow_key_record *flow) in flow_dissector() argument 120 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector() 122 nhoff = parse_ipv6(skb, nhoff, &ip_proto, flow); in flow_dissector() 158 nhoff = parse_ip(skb, nhoff, &ip_proto, flow); in flow_dissector() [all …]
|
/Linux-v5.4/drivers/firmware/ |
D | ti_sci.h | 788 * struct ti_sci_msg_udmap_rx_flow_cfg - UDMAP receive flow configuration 791 * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is 793 * @flow_index: UDMAP receive flow index for non-optional configuration. 795 * @rx_einfo_present: UDMAP receive flow extended packet info present. 796 * @rx_psinfo_present: UDMAP receive flow PS words present. 797 * @rx_error_handling: UDMAP receive flow error handling configuration. Valid 799 * @rx_desc_type: UDMAP receive flow descriptor type. It can be one of 801 * @rx_sop_offset: UDMAP receive flow start of packet offset. 802 * @rx_dest_qnum: UDMAP receive flow destination queue number. 803 * @rx_ps_location: UDMAP receive flow PS words location. [all …]
|
/Linux-v5.4/drivers/infiniband/hw/usnic/ |
D | usnic_fwd.c | 203 struct usnic_fwd_flow *flow; in usnic_fwd_alloc_flow() local 213 flow = kzalloc(sizeof(*flow), GFP_ATOMIC); in usnic_fwd_alloc_flow() 214 if (!flow) in usnic_fwd_alloc_flow() 255 flow->flow_id = (uint32_t) a0; in usnic_fwd_alloc_flow() 256 flow->vnic_idx = uaction->vnic_idx; in usnic_fwd_alloc_flow() 257 flow->ufdev = ufdev; in usnic_fwd_alloc_flow() 263 return flow; in usnic_fwd_alloc_flow() 265 kfree(flow); in usnic_fwd_alloc_flow() 269 int usnic_fwd_dealloc_flow(struct usnic_fwd_flow *flow) in usnic_fwd_dealloc_flow() argument 274 a0 = flow->flow_id; in usnic_fwd_dealloc_flow() [all …]
|
/Linux-v5.4/tools/testing/selftests/bpf/progs/ |
D | test_xdp_noinline.c | 122 struct flow_key flow; member 244 pckt->flow.port16[0] = udp->source; in parse_udp() 245 pckt->flow.port16[1] = udp->dest; in parse_udp() 247 pckt->flow.port16[0] = udp->dest; in parse_udp() 248 pckt->flow.port16[1] = udp->source; in parse_udp() 268 pckt->flow.port16[0] = tcp->source; in parse_tcp() 269 pckt->flow.port16[1] = tcp->dest; in parse_tcp() 271 pckt->flow.port16[0] = tcp->dest; in parse_tcp() 272 pckt->flow.port16[1] = tcp->source; in parse_tcp() 307 ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0]; in encap_v6() [all …]
|