1 /*
2 * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #ifndef __MLX5E_EN_ACCEL_H__
35 #define __MLX5E_EN_ACCEL_H__
36
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include "en_accel/ipsec_rxtx.h"
40 #include "en_accel/tls.h"
41 #include "en_accel/tls_rxtx.h"
42 #include "en.h"
43 #include "en/txrx.h"
44
45 #if IS_ENABLED(CONFIG_GENEVE)
46 #include <net/geneve.h>
47
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)48 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
49 {
50 return mlx5_tx_swp_supported(mdev);
51 }
52
53 static inline void
mlx5e_tx_tunnel_accel(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)54 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
55 {
56 struct mlx5e_swp_spec swp_spec = {};
57 unsigned int offset = 0;
58 __be16 l3_proto;
59 u8 l4_proto;
60
61 l3_proto = vlan_get_protocol(skb);
62 switch (l3_proto) {
63 case htons(ETH_P_IP):
64 l4_proto = ip_hdr(skb)->protocol;
65 break;
66 case htons(ETH_P_IPV6):
67 l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
68 break;
69 default:
70 return;
71 }
72
73 if (l4_proto != IPPROTO_UDP ||
74 udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
75 return;
76 swp_spec.l3_proto = l3_proto;
77 swp_spec.l4_proto = l4_proto;
78 swp_spec.is_tun = true;
79 if (inner_ip_hdr(skb)->version == 6) {
80 swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
81 swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
82 } else {
83 swp_spec.tun_l3_proto = htons(ETH_P_IP);
84 swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
85 }
86
87 mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
88 }
89
90 #else
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)91 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
92 {
93 return false;
94 }
95
96 #endif /* CONFIG_GENEVE */
97
98 static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff * skb)99 mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
100 {
101 int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
102
103 udp_hdr(skb)->len = htons(payload_len);
104 }
105
106 struct mlx5e_accel_tx_state {
107 #ifdef CONFIG_MLX5_EN_TLS
108 struct mlx5e_accel_tx_tls_state tls;
109 #endif
110 #ifdef CONFIG_MLX5_EN_IPSEC
111 struct mlx5e_accel_tx_ipsec_state ipsec;
112 #endif
113 };
114
mlx5e_accel_tx_begin(struct net_device * dev,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * state)115 static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
116 struct mlx5e_txqsq *sq,
117 struct sk_buff *skb,
118 struct mlx5e_accel_tx_state *state)
119 {
120 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
121 mlx5e_udp_gso_handle_tx_skb(skb);
122
123 #ifdef CONFIG_MLX5_EN_TLS
124 if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) {
125 /* May send SKBs and WQEs. */
126 if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls)))
127 return false;
128 }
129 #endif
130
131 #ifdef CONFIG_MLX5_EN_IPSEC
132 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
133 if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
134 return false;
135 }
136 #endif
137
138 return true;
139 }
140
mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state * state)141 static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
142 {
143 #ifdef CONFIG_MLX5_EN_IPSEC
144 return mlx5e_ipsec_is_tx_flow(&state->ipsec);
145 #endif
146
147 return false;
148 }
149
mlx5e_accel_tx_ids_len(struct mlx5e_txqsq * sq,struct mlx5e_accel_tx_state * state)150 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
151 struct mlx5e_accel_tx_state *state)
152 {
153 #ifdef CONFIG_MLX5_EN_IPSEC
154 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
155 return mlx5e_ipsec_tx_ids_len(&state->ipsec);
156 #endif
157
158 return 0;
159 }
160
161 /* Part of the eseg touched by TX offloads */
162 #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
163
mlx5e_accel_tx_eseg(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg)164 static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
165 struct sk_buff *skb,
166 struct mlx5_wqe_eth_seg *eseg)
167 {
168 #ifdef CONFIG_MLX5_EN_IPSEC
169 if (xfrm_offload(skb))
170 mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
171 #endif
172
173 #if IS_ENABLED(CONFIG_GENEVE)
174 if (skb->encapsulation)
175 mlx5e_tx_tunnel_accel(skb, eseg);
176 #endif
177
178 return true;
179 }
180
mlx5e_accel_tx_finish(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe * wqe,struct mlx5e_accel_tx_state * state,struct mlx5_wqe_inline_seg * inlseg)181 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
182 struct mlx5e_tx_wqe *wqe,
183 struct mlx5e_accel_tx_state *state,
184 struct mlx5_wqe_inline_seg *inlseg)
185 {
186 #ifdef CONFIG_MLX5_EN_TLS
187 mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
188 #endif
189
190 #ifdef CONFIG_MLX5_EN_IPSEC
191 if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
192 state->ipsec.xo && state->ipsec.tailen)
193 mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
194 #endif
195 }
196
mlx5e_accel_init_rx(struct mlx5e_priv * priv)197 static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
198 {
199 return mlx5e_ktls_init_rx(priv);
200 }
201
mlx5e_accel_cleanup_rx(struct mlx5e_priv * priv)202 static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
203 {
204 mlx5e_ktls_cleanup_rx(priv);
205 }
206 #endif /* __MLX5E_EN_ACCEL_H__ */
207