1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include "fs_core.h"
5 #include "lib/ipsec_fs_roce.h"
6 #include "mlx5_core.h"
7
8 struct mlx5_ipsec_miss {
9 struct mlx5_flow_group *group;
10 struct mlx5_flow_handle *rule;
11 };
12
13 struct mlx5_ipsec_rx_roce {
14 struct mlx5_flow_group *g;
15 struct mlx5_flow_table *ft;
16 struct mlx5_flow_handle *rule;
17 struct mlx5_ipsec_miss roce_miss;
18
19 struct mlx5_flow_table *ft_rdma;
20 struct mlx5_flow_namespace *ns_rdma;
21 };
22
23 struct mlx5_ipsec_tx_roce {
24 struct mlx5_flow_group *g;
25 struct mlx5_flow_table *ft;
26 struct mlx5_flow_handle *rule;
27 struct mlx5_flow_namespace *ns;
28 };
29
30 struct mlx5_ipsec_fs {
31 struct mlx5_ipsec_rx_roce ipv4_rx;
32 struct mlx5_ipsec_rx_roce ipv6_rx;
33 struct mlx5_ipsec_tx_roce tx;
34 };
35
ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec * spec,u16 dport)36 static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
37 u16 dport)
38 {
39 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
40 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
41 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
42 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
43 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
44 }
45
46 static int
ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_flow_destination * default_dst,struct mlx5_ipsec_rx_roce * roce)47 ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
48 struct mlx5_flow_destination *default_dst,
49 struct mlx5_ipsec_rx_roce *roce)
50 {
51 struct mlx5_flow_destination dst = {};
52 MLX5_DECLARE_FLOW_ACT(flow_act);
53 struct mlx5_flow_handle *rule;
54 struct mlx5_flow_spec *spec;
55 int err = 0;
56
57 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
58 if (!spec)
59 return -ENOMEM;
60
61 ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
62
63 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
64 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
65 dst.ft = roce->ft_rdma;
66 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
67 if (IS_ERR(rule)) {
68 err = PTR_ERR(rule);
69 mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
70 err);
71 goto fail_add_rule;
72 }
73
74 roce->rule = rule;
75
76 memset(spec, 0, sizeof(*spec));
77 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
78 if (IS_ERR(rule)) {
79 err = PTR_ERR(rule);
80 mlx5_core_err(mdev, "Fail to add RX RoCE IPsec miss rule err=%d\n",
81 err);
82 goto fail_add_default_rule;
83 }
84
85 roce->roce_miss.rule = rule;
86
87 kvfree(spec);
88 return 0;
89
90 fail_add_default_rule:
91 mlx5_del_flow_rules(roce->rule);
92 fail_add_rule:
93 kvfree(spec);
94 return err;
95 }
96
ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft)97 static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev,
98 struct mlx5_ipsec_tx_roce *roce,
99 struct mlx5_flow_table *pol_ft)
100 {
101 struct mlx5_flow_destination dst = {};
102 MLX5_DECLARE_FLOW_ACT(flow_act);
103 struct mlx5_flow_handle *rule;
104 int err = 0;
105
106 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
107 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
108 dst.ft = pol_ft;
109 rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
110 1);
111 if (IS_ERR(rule)) {
112 err = PTR_ERR(rule);
113 mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
114 err);
115 goto out;
116 }
117 roce->rule = rule;
118
119 out:
120 return err;
121 }
122
mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs * ipsec_roce)123 void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
124 {
125 struct mlx5_ipsec_tx_roce *tx_roce;
126
127 if (!ipsec_roce)
128 return;
129
130 tx_roce = &ipsec_roce->tx;
131
132 mlx5_del_flow_rules(tx_roce->rule);
133 mlx5_destroy_flow_group(tx_roce->g);
134 mlx5_destroy_flow_table(tx_roce->ft);
135 }
136
137 #define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
138
mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_table * pol_ft)139 int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
140 struct mlx5_ipsec_fs *ipsec_roce,
141 struct mlx5_flow_table *pol_ft)
142 {
143 struct mlx5_flow_table_attr ft_attr = {};
144 struct mlx5_ipsec_tx_roce *roce;
145 struct mlx5_flow_table *ft;
146 struct mlx5_flow_group *g;
147 int ix = 0;
148 int err;
149 u32 *in;
150
151 if (!ipsec_roce)
152 return 0;
153
154 roce = &ipsec_roce->tx;
155
156 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
157 if (!in)
158 return -ENOMEM;
159
160 ft_attr.max_fte = 1;
161 ft = mlx5_create_flow_table(roce->ns, &ft_attr);
162 if (IS_ERR(ft)) {
163 err = PTR_ERR(ft);
164 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
165 goto free_in;
166 }
167
168 roce->ft = ft;
169
170 MLX5_SET_CFG(in, start_flow_index, ix);
171 ix += MLX5_TX_ROCE_GROUP_SIZE;
172 MLX5_SET_CFG(in, end_flow_index, ix - 1);
173 g = mlx5_create_flow_group(ft, in);
174 if (IS_ERR(g)) {
175 err = PTR_ERR(g);
176 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
177 goto destroy_table;
178 }
179 roce->g = g;
180
181 err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
182 if (err) {
183 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
184 goto destroy_group;
185 }
186
187 kvfree(in);
188 return 0;
189
190 destroy_group:
191 mlx5_destroy_flow_group(roce->g);
192 destroy_table:
193 mlx5_destroy_flow_table(ft);
194 free_in:
195 kvfree(in);
196 return err;
197 }
198
mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs * ipsec_roce,u32 family)199 struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
200 {
201 struct mlx5_ipsec_rx_roce *rx_roce;
202
203 if (!ipsec_roce)
204 return NULL;
205
206 rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
207 &ipsec_roce->ipv6_rx;
208
209 return rx_roce->ft;
210 }
211
mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs * ipsec_roce,u32 family)212 void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
213 {
214 struct mlx5_ipsec_rx_roce *rx_roce;
215
216 if (!ipsec_roce)
217 return;
218
219 rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
220 &ipsec_roce->ipv6_rx;
221
222 mlx5_del_flow_rules(rx_roce->roce_miss.rule);
223 mlx5_del_flow_rules(rx_roce->rule);
224 mlx5_destroy_flow_table(rx_roce->ft_rdma);
225 mlx5_destroy_flow_group(rx_roce->roce_miss.group);
226 mlx5_destroy_flow_group(rx_roce->g);
227 mlx5_destroy_flow_table(rx_roce->ft);
228 }
229
230 #define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
231
mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_namespace * ns,struct mlx5_flow_destination * default_dst,u32 family,u32 level,u32 prio)232 int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
233 struct mlx5_ipsec_fs *ipsec_roce,
234 struct mlx5_flow_namespace *ns,
235 struct mlx5_flow_destination *default_dst,
236 u32 family, u32 level, u32 prio)
237 {
238 struct mlx5_flow_table_attr ft_attr = {};
239 struct mlx5_ipsec_rx_roce *roce;
240 struct mlx5_flow_table *ft;
241 struct mlx5_flow_group *g;
242 void *outer_headers_c;
243 int ix = 0;
244 u32 *in;
245 int err;
246 u8 *mc;
247
248 if (!ipsec_roce)
249 return 0;
250
251 roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
252 &ipsec_roce->ipv6_rx;
253
254 ft_attr.max_fte = 2;
255 ft_attr.level = level;
256 ft_attr.prio = prio;
257 ft = mlx5_create_flow_table(ns, &ft_attr);
258 if (IS_ERR(ft)) {
259 err = PTR_ERR(ft);
260 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at nic err=%d\n", err);
261 return err;
262 }
263
264 roce->ft = ft;
265
266 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
267 if (!in) {
268 err = -ENOMEM;
269 goto fail_nomem;
270 }
271
272 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
273 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
274 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
275 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
276
277 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
278 MLX5_SET_CFG(in, start_flow_index, ix);
279 ix += MLX5_RX_ROCE_GROUP_SIZE;
280 MLX5_SET_CFG(in, end_flow_index, ix - 1);
281 g = mlx5_create_flow_group(ft, in);
282 if (IS_ERR(g)) {
283 err = PTR_ERR(g);
284 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group at nic err=%d\n", err);
285 goto fail_group;
286 }
287 roce->g = g;
288
289 memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
290 MLX5_SET_CFG(in, start_flow_index, ix);
291 ix += MLX5_RX_ROCE_GROUP_SIZE;
292 MLX5_SET_CFG(in, end_flow_index, ix - 1);
293 g = mlx5_create_flow_group(ft, in);
294 if (IS_ERR(g)) {
295 err = PTR_ERR(g);
296 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx miss group at nic err=%d\n", err);
297 goto fail_mgroup;
298 }
299 roce->roce_miss.group = g;
300
301 memset(&ft_attr, 0, sizeof(ft_attr));
302 if (family == AF_INET)
303 ft_attr.level = 1;
304 ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
305 if (IS_ERR(ft)) {
306 err = PTR_ERR(ft);
307 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
308 goto fail_rdma_table;
309 }
310
311 roce->ft_rdma = ft;
312
313 err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
314 if (err) {
315 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
316 goto fail_setup_rule;
317 }
318
319 kvfree(in);
320 return 0;
321
322 fail_setup_rule:
323 mlx5_destroy_flow_table(roce->ft_rdma);
324 fail_rdma_table:
325 mlx5_destroy_flow_group(roce->roce_miss.group);
326 fail_mgroup:
327 mlx5_destroy_flow_group(roce->g);
328 fail_group:
329 kvfree(in);
330 fail_nomem:
331 mlx5_destroy_flow_table(roce->ft);
332 return err;
333 }
334
mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs * ipsec_roce)335 void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
336 {
337 kfree(ipsec_roce);
338 }
339
mlx5_ipsec_fs_roce_init(struct mlx5_core_dev * mdev)340 struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
341 {
342 struct mlx5_ipsec_fs *roce_ipsec;
343 struct mlx5_flow_namespace *ns;
344
345 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
346 if (!ns) {
347 mlx5_core_err(mdev, "Failed to get RoCE rx ns\n");
348 return NULL;
349 }
350
351 roce_ipsec = kzalloc(sizeof(*roce_ipsec), GFP_KERNEL);
352 if (!roce_ipsec)
353 return NULL;
354
355 roce_ipsec->ipv4_rx.ns_rdma = ns;
356 roce_ipsec->ipv6_rx.ns_rdma = ns;
357
358 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
359 if (!ns) {
360 mlx5_core_err(mdev, "Failed to get RoCE tx ns\n");
361 goto err_tx;
362 }
363
364 roce_ipsec->tx.ns = ns;
365
366 return roce_ipsec;
367
368 err_tx:
369 kfree(roce_ipsec);
370 return NULL;
371 }
372