Home
last modified time | relevance | path

Searched refs:mtu (Results 1 – 25 of 879) sorted by relevance

12345678910>>...36

/Linux-v5.15/drivers/usb/mtu3/
Dmtu3_gadget.c15 __releases(mep->mtu->lock) in mtu3_req_complete()
16 __acquires(mep->mtu->lock) in mtu3_req_complete()
19 struct mtu3 *mtu = mreq->mtu; in mtu3_req_complete() local
26 spin_unlock(&mtu->lock); in mtu3_req_complete()
30 usb_gadget_unmap_request(&mtu->g, req, mep->is_in); in mtu3_req_complete()
32 dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", in mtu3_req_complete()
36 spin_lock(&mtu->lock); in mtu3_req_complete()
46 dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); in nuke()
63 struct mtu3 *mtu = mep->mtu; in mtu3_ep_enable() local
74 switch (mtu->g.speed) { in mtu3_ep_enable()
[all …]
Dmtu3_core.c45 dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n", in ep_fifo_alloc()
66 dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n", in ep_fifo_free()
71 static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable) in mtu3_ss_func_set() argument
75 mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set()
77 mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set()
79 dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable); in mtu3_ss_func_set()
83 static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) in mtu3_hs_softconn_set() argument
86 mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set()
89 mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set()
92 dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); in mtu3_hs_softconn_set()
[all …]
Dmtu3_gadget_ep0.c18 #define next_ep0_request(mtu) next_request((mtu)->ep0) argument
39 static char *decode_ep0_state(struct mtu3 *mtu) in decode_ep0_state() argument
41 switch (mtu->ep0_state) { in decode_ep0_state()
57 static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) in ep0_req_giveback() argument
59 mtu3_req_complete(mtu->ep0, req, 0); in ep0_req_giveback()
63 forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) in forward_to_driver() argument
64 __releases(mtu->lock) in forward_to_driver()
65 __acquires(mtu->lock) in forward_to_driver()
69 if (!mtu->gadget_driver) in forward_to_driver()
72 spin_unlock(&mtu->lock); in forward_to_driver()
[all …]
Dmtu3_qmu.c38 #define GPD_RX_BUF_LEN(mtu, x) \ argument
41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
46 #define GPD_DATA_LEN(mtu, x) \ argument
49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
57 #define GPD_EXT_NGP(mtu, x) \ argument
60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
63 #define GPD_EXT_BUF(mtu, x) \ argument
66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); in mtu3_gpd_ring_alloc()
184 dma_pool_free(mep->mtu->qmu_gpd_pool, in mtu3_gpd_ring_free()
[all …]
Dmtu3_debugfs.c81 struct mtu3 *mtu = sf->private; in mtu3_link_state_show() local
82 void __iomem *mbase = mtu->mac_base; in mtu3_link_state_show()
93 struct mtu3 *mtu = sf->private; in mtu3_ep_used_show() local
99 spin_lock_irqsave(&mtu->lock, flags); in mtu3_ep_used_show()
101 for (i = 0; i < mtu->num_eps; i++) { in mtu3_ep_used_show()
102 mep = mtu->in_eps + i; in mtu3_ep_used_show()
108 mep = mtu->out_eps + i; in mtu3_ep_used_show()
116 spin_unlock_irqrestore(&mtu->lock, flags); in mtu3_ep_used_show()
124 static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base, in mtu3_debugfs_regset() argument
131 mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL); in mtu3_debugfs_regset()
[all …]
Dmtu3.h276 struct mtu3 *mtu; member
299 struct mtu3 *mtu; member
419 int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
421 void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep);
423 void mtu3_start(struct mtu3 *mtu);
424 void mtu3_stop(struct mtu3 *mtu);
425 void mtu3_dev_on_off(struct mtu3 *mtu, int is_on);
427 int mtu3_gadget_setup(struct mtu3 *mtu);
428 void mtu3_gadget_cleanup(struct mtu3 *mtu);
429 void mtu3_gadget_reset(struct mtu3 *mtu);
[all …]
/Linux-v5.15/drivers/clocksource/
Dsh_mtu2.c33 struct sh_mtu2_device *mtu; member
161 return ioread8(ch->mtu->mapbase + 0x280); in sh_mtu2_read()
177 return iowrite8(value, ch->mtu->mapbase + 0x280); in sh_mtu2_write()
192 raw_spin_lock_irqsave(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch()
201 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch()
210 pm_runtime_get_sync(&ch->mtu->pdev->dev); in sh_mtu2_enable()
211 dev_pm_syscore_device(&ch->mtu->pdev->dev, true); in sh_mtu2_enable()
214 ret = clk_enable(ch->mtu->clk); in sh_mtu2_enable()
216 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", in sh_mtu2_enable()
224 rate = clk_get_rate(ch->mtu->clk) / 64; in sh_mtu2_enable()
[all …]
/Linux-v5.15/tools/testing/selftests/net/
Dpmtu.sh907 mtu() { function
910 mtu="${3}"
912 ${ns_cmd} ip link set dev ${dev} mtu ${mtu}
988 mtu "${ns_a}" veth_A-R1 2000
989 mtu "${ns_r1}" veth_R1-A 2000
990 mtu "${ns_r1}" veth_R1-B 1400
991 mtu "${ns_b}" veth_B-R1 1400
993 mtu "${ns_a}" veth_A-R2 2000
994 mtu "${ns_r2}" veth_R2-A 2000
995 mtu "${ns_r2}" veth_R2-B 1500
[all …]
Dfib_nexthop_multiprefix.sh156 local mtu=$2
158 run_cmd ip -netns h${hostid} li set eth0 mtu ${mtu}
159 run_cmd ip -netns r1 li set eth${hostid} mtu ${mtu}
168 local mtu=$2
183 echo " cache .* mtu ${mtu}"
188 grep -q "cache .* mtu ${mtu}"
191 log_test $rc 0 "IPv4: host 0 to host ${i}, mtu ${mtu}"
197 local mtu=$2
212 echo " ${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
217 grep -q "${dst} from :: via ${r1} dev eth0 src ${h0} .* mtu ${mtu}"
[all …]
/Linux-v5.15/tools/testing/selftests/net/forwarding/
Dipip_lib.sh320 local mtu=$1
322 ip link set mtu $mtu dev $h1
323 ip link set mtu $mtu dev $ol1
324 ip link set mtu $mtu dev g1a
325 ip link set mtu $mtu dev $ul1
326 ip link set mtu $mtu dev $ul1.111
327 ip link set mtu $mtu dev $h2
328 ip link set mtu $mtu dev $ol2
329 ip link set mtu $mtu dev g2a
330 ip link set mtu $mtu dev $ul2
[all …]
/Linux-v5.15/drivers/infiniband/sw/rxe/
Drxe_param.h12 static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu) in rxe_mtu_int_to_enum() argument
14 if (mtu < 256) in rxe_mtu_int_to_enum()
16 else if (mtu < 512) in rxe_mtu_int_to_enum()
18 else if (mtu < 1024) in rxe_mtu_int_to_enum()
20 else if (mtu < 2048) in rxe_mtu_int_to_enum()
22 else if (mtu < 4096) in rxe_mtu_int_to_enum()
29 static inline enum ib_mtu eth_mtu_int_to_enum(int mtu) in eth_mtu_int_to_enum() argument
31 mtu -= RXE_MAX_HDR_LENGTH; in eth_mtu_int_to_enum()
33 return rxe_mtu_int_to_enum(mtu); in eth_mtu_int_to_enum()
/Linux-v5.15/tools/testing/selftests/bpf/prog_tests/
Dcheck_mtu.c111 static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex) in test_check_mtu_xdp() argument
121 skel->rodata->GLOBAL_USER_MTU = mtu; in test_check_mtu_xdp()
128 test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu); in test_check_mtu_xdp()
129 test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu); in test_check_mtu_xdp()
130 test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu); in test_check_mtu_xdp()
131 test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu); in test_check_mtu_xdp()
132 test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu); in test_check_mtu_xdp()
171 static void test_check_mtu_tc(__u32 mtu, __u32 ifindex) in test_check_mtu_tc() argument
181 skel->rodata->GLOBAL_USER_MTU = mtu; in test_check_mtu_tc()
188 test_check_mtu_run_tc(skel, skel->progs.tc_use_helper, mtu); in test_check_mtu_tc()
[all …]
/Linux-v5.15/net/ipv6/
Dxfrm6_output.c19 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu) in xfrm6_local_rxpmtu() argument
27 ipv6_local_rxpmtu(sk, &fl6, mtu); in xfrm6_local_rxpmtu()
30 void xfrm6_local_error(struct sk_buff *skb, u32 mtu) in xfrm6_local_error() argument
40 ipv6_local_error(sk, EMSGSIZE, &fl6, mtu); in xfrm6_local_error()
52 unsigned int mtu; in __xfrm6_output() local
66 mtu = ip6_skb_dst_mtu(skb); in __xfrm6_output()
68 mtu = dst_mtu(skb_dst(skb)); in __xfrm6_output()
70 toobig = skb->len > mtu && !skb_is_gso(skb); in __xfrm6_output()
73 xfrm6_local_rxpmtu(skb, mtu); in __xfrm6_output()
77 xfrm_local_error(skb, mtu); in __xfrm6_output()
Dip6_output.c139 struct sk_buff *skb, unsigned int mtu) in ip6_finish_output_gso_slowpath_drop() argument
172 unsigned int mtu; in __ip6_finish_output() local
182 mtu = ip6_skb_dst_mtu(skb); in __ip6_finish_output()
183 if (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)) in __ip6_finish_output()
184 return ip6_finish_output_gso_slowpath_drop(net, sk, skb, mtu); in __ip6_finish_output()
186 if ((skb->len > mtu && !skb_is_gso(skb)) || in __ip6_finish_output()
259 u32 mtu; in ip6_xmit() local
310 mtu = dst_mtu(dst); in ip6_xmit()
311 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) { in ip6_xmit()
333 ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu); in ip6_xmit()
[all …]
Dnetfilter.c127 unsigned int mtu, hlen; in br_ip6_fragment() local
137 mtu = skb->dev->mtu; in br_ip6_fragment()
138 if (frag_max_size > mtu || in br_ip6_fragment()
142 mtu = frag_max_size; in br_ip6_fragment()
143 if (mtu < hlen + sizeof(struct frag_hdr) + 8) in br_ip6_fragment()
145 mtu -= hlen + sizeof(struct frag_hdr); in br_ip6_fragment()
160 if (first_len - hlen > mtu || in br_ip6_fragment()
168 if (frag2->len > mtu || in br_ip6_fragment()
209 ip6_frag_init(skb, hlen, mtu, skb->dev->needed_tailroom, in br_ip6_fragment()
/Linux-v5.15/net/rxrpc/
Dpeer_event.c109 u32 mtu = serr->ee.ee_info; in rxrpc_adjust_mtu() local
111 _net("Rx ICMP Fragmentation Needed (%d)", mtu); in rxrpc_adjust_mtu()
114 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { in rxrpc_adjust_mtu()
115 peer->if_mtu = mtu; in rxrpc_adjust_mtu()
116 _net("I/F MTU %u", mtu); in rxrpc_adjust_mtu()
119 if (mtu == 0) { in rxrpc_adjust_mtu()
121 mtu = peer->if_mtu; in rxrpc_adjust_mtu()
122 if (mtu > 1500) { in rxrpc_adjust_mtu()
123 mtu >>= 1; in rxrpc_adjust_mtu()
124 if (mtu < 1500) in rxrpc_adjust_mtu()
[all …]
/Linux-v5.15/include/net/
Dip6_route.h185 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
187 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
203 void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
268 unsigned int mtu; in ip6_skb_dst_mtu() local
274 mtu = READ_ONCE(skb_dst(skb)->dev->mtu); in ip6_skb_dst_mtu()
275 mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); in ip6_skb_dst_mtu()
277 mtu = dst_mtu(skb_dst(skb)); in ip6_skb_dst_mtu()
279 return mtu; in ip6_skb_dst_mtu()
323 unsigned int mtu; in ip6_dst_mtu_maybe_forward() local
326 mtu = dst_metric_raw(dst, RTAX_MTU); in ip6_dst_mtu_maybe_forward()
[all …]
Dip.h193 unsigned int mtu; member
201 unsigned int mtu, bool DF, struct ip_frag_state *state);
441 unsigned int mtu; in ip_dst_mtu_maybe_forward() local
446 mtu = rt->rt_pmtu; in ip_dst_mtu_maybe_forward()
447 if (mtu && time_before(jiffies, rt->dst.expires)) in ip_dst_mtu_maybe_forward()
452 mtu = dst_metric_raw(dst, RTAX_MTU); in ip_dst_mtu_maybe_forward()
453 if (mtu) in ip_dst_mtu_maybe_forward()
456 mtu = READ_ONCE(dst->dev->mtu); in ip_dst_mtu_maybe_forward()
459 if (rt->rt_uses_gateway && mtu > 576) in ip_dst_mtu_maybe_forward()
460 mtu = 576; in ip_dst_mtu_maybe_forward()
[all …]
/Linux-v5.15/net/ipv4/
Dip_forward.c43 static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) in ip_exceeds_mtu() argument
45 if (skb->len <= mtu) in ip_exceeds_mtu()
52 if (unlikely(IPCB(skb)->frag_max_size > mtu)) in ip_exceeds_mtu()
58 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) in ip_exceeds_mtu()
88 u32 mtu; in ip_forward() local
130 mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); in ip_forward()
131 if (ip_exceeds_mtu(skb, mtu)) { in ip_forward()
134 htonl(mtu)); in ip_forward()
Dip_tunnel.c285 int mtu = ETH_DATA_LEN; in ip_tunnel_bind_dev() local
316 mtu = min(tdev->mtu, IP_MAX_MTU); in ip_tunnel_bind_dev()
320 mtu -= t_hlen + (dev->type == ARPHRD_ETHER ? dev->hard_header_len : 0); in ip_tunnel_bind_dev()
322 if (mtu < IPV4_MIN_MTU) in ip_tunnel_bind_dev()
323 mtu = IPV4_MIN_MTU; in ip_tunnel_bind_dev()
325 return mtu; in ip_tunnel_bind_dev()
335 int mtu; in ip_tunnel_create() local
342 mtu = ip_tunnel_bind_dev(dev); in ip_tunnel_create()
343 err = dev_set_mtu(dev, mtu); in ip_tunnel_create()
491 int mtu; in tnl_update_pmtu() local
[all …]
Dip_output.c88 unsigned int mtu,
234 struct sk_buff *skb, unsigned int mtu) in ip_finish_output_gso() argument
242 if (skb_gso_validate_network_len(skb, mtu)) in ip_finish_output_gso()
272 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); in ip_finish_output_gso()
283 unsigned int mtu; in __ip_finish_output() local
292 mtu = ip_skb_dst_mtu(sk, skb); in __ip_finish_output()
294 return ip_finish_output_gso(net, sk, skb, mtu); in __ip_finish_output()
296 if (skb->len > mtu || IPCB(skb)->frag_max_size) in __ip_finish_output()
297 return ip_fragment(net, sk, skb, mtu, ip_finish_output2); in __ip_finish_output()
568 unsigned int mtu, in ip_fragment() argument
[all …]
/Linux-v5.15/net/sched/
Dsch_teql.c194 dev->mtu < m->dev->mtu) in teql_qdisc_init()
203 if (dev->mtu < m->dev->mtu) in teql_qdisc_init()
204 m->dev->mtu = dev->mtu; in teql_qdisc_init()
211 m->dev->mtu = dev->mtu; in teql_qdisc_init()
360 int mtu = 0xFFFE; in teql_master_open() local
375 if (slave->mtu < mtu) in teql_master_open()
376 mtu = slave->mtu; in teql_master_open()
392 m->dev->mtu = mtu; in teql_master_open()
423 if (new_mtu > qdisc_dev(q)->mtu) in teql_master_mtu()
428 dev->mtu = new_mtu; in teql_master_mtu()
[all …]
Dsch_tbf.c102 s64 mtu; member
272 if (ptoks > q->mtu) in tbf_dequeue()
273 ptoks = q->mtu; in tbf_dequeue()
323 q->ptokens = q->mtu; in tbf_reset()
348 s64 buffer, mtu; in tbf_change() local
372 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); in tbf_change()
399 mtu = psched_l2t_ns(&peak, pburst); in tbf_change()
401 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu)); in tbf_change()
441 q->mtu = mtu; in tbf_change()
443 q->mtu = PSCHED_TICKS2NS(qopt->mtu); in tbf_change()
[all …]
/Linux-v5.15/drivers/infiniband/hw/irdma/
Dmain.c55 static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev) in irdma_log_invalid_mtu() argument
57 if (mtu < IRDMA_MIN_MTU_IPV4) in irdma_log_invalid_mtu()
58 …rn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu); in irdma_log_invalid_mtu()
59 else if (mtu < IRDMA_MIN_MTU_IPV6) in irdma_log_invalid_mtu()
60 …(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu); in irdma_log_invalid_mtu()
90 ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); in irdma_iidc_event_handler()
91 if (iwdev->vsi.mtu != iwdev->netdev->mtu) { in irdma_iidc_event_handler()
92 l2params.mtu = iwdev->netdev->mtu; in irdma_iidc_event_handler()
94 irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); in irdma_iidc_event_handler()
282 l2params.mtu = iwdev->netdev->mtu; in irdma_probe()
/Linux-v5.15/include/rdma/
Dib_addr.h174 static inline enum ib_mtu iboe_get_mtu(int mtu) in iboe_get_mtu() argument
179 mtu = mtu - (IB_GRH_BYTES + IB_UDP_BYTES + IB_BTH_BYTES + in iboe_get_mtu()
183 if (mtu >= ib_mtu_enum_to_int(IB_MTU_4096)) in iboe_get_mtu()
185 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_2048)) in iboe_get_mtu()
187 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_1024)) in iboe_get_mtu()
189 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_512)) in iboe_get_mtu()
191 else if (mtu >= ib_mtu_enum_to_int(IB_MTU_256)) in iboe_get_mtu()

12345678910>>...36