1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/rhashtable.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/fs_helpers.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/rbtree.h>
39 
40 #include "mlx5_core.h"
41 #include "fs_cmd.h"
42 #include "fpga/ipsec.h"
43 #include "fpga/sdk.h"
44 #include "fpga/core.h"
45 
46 enum mlx5_fpga_ipsec_cmd_status {
47 	MLX5_FPGA_IPSEC_CMD_PENDING,
48 	MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
49 	MLX5_FPGA_IPSEC_CMD_COMPLETE,
50 };
51 
52 struct mlx5_fpga_ipsec_cmd_context {
53 	struct mlx5_fpga_dma_buf buf;
54 	enum mlx5_fpga_ipsec_cmd_status status;
55 	struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
56 	int status_code;
57 	struct completion complete;
58 	struct mlx5_fpga_device *dev;
59 	struct list_head list; /* Item in pending_cmds */
60 	u8 command[0];
61 };
62 
63 struct mlx5_fpga_esp_xfrm;
64 
65 struct mlx5_fpga_ipsec_sa_ctx {
66 	struct rhash_head		hash;
67 	struct mlx5_ifc_fpga_ipsec_sa	hw_sa;
68 	struct mlx5_core_dev		*dev;
69 	struct mlx5_fpga_esp_xfrm	*fpga_xfrm;
70 };
71 
72 struct mlx5_fpga_esp_xfrm {
73 	unsigned int			num_rules;
74 	struct mlx5_fpga_ipsec_sa_ctx	*sa_ctx;
75 	struct mutex			lock; /* xfrm lock */
76 	struct mlx5_accel_esp_xfrm	accel_xfrm;
77 };
78 
79 struct mlx5_fpga_ipsec_rule {
80 	struct rb_node			node;
81 	struct fs_fte			*fte;
82 	struct mlx5_fpga_ipsec_sa_ctx	*ctx;
83 };
84 
85 static const struct rhashtable_params rhash_sa = {
86 	.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
87 	.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
88 	.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
89 	.automatic_shrinking = true,
90 	.min_size = 1,
91 };
92 
93 struct mlx5_fpga_ipsec {
94 	struct mlx5_fpga_device *fdev;
95 	struct list_head pending_cmds;
96 	spinlock_t pending_cmds_lock; /* Protects pending_cmds */
97 	u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
98 	struct mlx5_fpga_conn *conn;
99 
100 	struct notifier_block	fs_notifier_ingress_bypass;
101 	struct notifier_block	fs_notifier_egress;
102 
103 	/* Map hardware SA           -->  SA context
104 	 *     (mlx5_fpga_ipsec_sa)       (mlx5_fpga_ipsec_sa_ctx)
105 	 * We will use this hash to avoid SAs duplication in fpga which
106 	 * aren't allowed
107 	 */
108 	struct rhashtable sa_hash;	/* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
109 	struct mutex sa_hash_lock;
110 
111 	/* Tree holding all rules for this fpga device
112 	 * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
113 	 */
114 	struct rb_root rules_rb;
115 	struct mutex rules_rb_lock; /* rules lock */
116 };
117 
mlx5_fpga_is_ipsec_device(struct mlx5_core_dev * mdev)118 static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
119 {
120 	if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
121 		return false;
122 
123 	if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
124 	    MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
125 		return false;
126 
127 	if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
128 	    MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
129 		return false;
130 
131 	return true;
132 }
133 
mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn * conn,struct mlx5_fpga_device * fdev,struct mlx5_fpga_dma_buf * buf,u8 status)134 static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
135 					  struct mlx5_fpga_device *fdev,
136 					  struct mlx5_fpga_dma_buf *buf,
137 					  u8 status)
138 {
139 	struct mlx5_fpga_ipsec_cmd_context *context;
140 
141 	if (status) {
142 		context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
143 				       buf);
144 		mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
145 			       status);
146 		context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
147 		complete(&context->complete);
148 	}
149 }
150 
151 static inline
syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)152 int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
153 {
154 	switch (syndrome) {
155 	case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
156 		return 0;
157 	case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
158 		return -EEXIST;
159 	case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
160 		return -EINVAL;
161 	case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
162 		return -EIO;
163 	}
164 	return -EIO;
165 }
166 
mlx5_fpga_ipsec_recv(void * cb_arg,struct mlx5_fpga_dma_buf * buf)167 static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
168 {
169 	struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
170 	struct mlx5_fpga_ipsec_cmd_context *context;
171 	enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
172 	struct mlx5_fpga_device *fdev = cb_arg;
173 	unsigned long flags;
174 
175 	if (buf->sg[0].size < sizeof(*resp)) {
176 		mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
177 			       buf->sg[0].size, sizeof(*resp));
178 		return;
179 	}
180 
181 	mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
182 		      ntohl(resp->syndrome));
183 
184 	spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
185 	context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
186 					   struct mlx5_fpga_ipsec_cmd_context,
187 					   list);
188 	if (context)
189 		list_del(&context->list);
190 	spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
191 
192 	if (!context) {
193 		mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
194 		return;
195 	}
196 	mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
197 
198 	syndrome = ntohl(resp->syndrome);
199 	context->status_code = syndrome_to_errno(syndrome);
200 	context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
201 	memcpy(&context->resp, resp, sizeof(*resp));
202 
203 	if (context->status_code)
204 		mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
205 			       syndrome);
206 
207 	complete(&context->complete);
208 }
209 
mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev * mdev,const void * cmd,int cmd_size)210 static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
211 				      const void *cmd, int cmd_size)
212 {
213 	struct mlx5_fpga_ipsec_cmd_context *context;
214 	struct mlx5_fpga_device *fdev = mdev->fpga;
215 	unsigned long flags;
216 	int res;
217 
218 	if (!fdev || !fdev->ipsec)
219 		return ERR_PTR(-EOPNOTSUPP);
220 
221 	if (cmd_size & 3)
222 		return ERR_PTR(-EINVAL);
223 
224 	context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
225 	if (!context)
226 		return ERR_PTR(-ENOMEM);
227 
228 	context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
229 	context->dev = fdev;
230 	context->buf.complete = mlx5_fpga_ipsec_send_complete;
231 	init_completion(&context->complete);
232 	memcpy(&context->command, cmd, cmd_size);
233 	context->buf.sg[0].size = cmd_size;
234 	context->buf.sg[0].data = &context->command;
235 
236 	spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
237 	res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
238 	if (!res)
239 		list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
240 	spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
241 
242 	if (res) {
243 		mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
244 		kfree(context);
245 		return ERR_PTR(res);
246 	}
247 
248 	/* Context should be freed by the caller after completion. */
249 	return context;
250 }
251 
mlx5_fpga_ipsec_cmd_wait(void * ctx)252 static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
253 {
254 	struct mlx5_fpga_ipsec_cmd_context *context = ctx;
255 	unsigned long timeout =
256 		msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
257 	int res;
258 
259 	res = wait_for_completion_timeout(&context->complete, timeout);
260 	if (!res) {
261 		mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
262 		return -ETIMEDOUT;
263 	}
264 
265 	if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
266 		res = context->status_code;
267 	else
268 		res = -EIO;
269 
270 	return res;
271 }
272 
is_v2_sadb_supported(struct mlx5_fpga_ipsec * fipsec)273 static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
274 {
275 	if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
276 		return true;
277 	return false;
278 }
279 
mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device * fdev,struct mlx5_ifc_fpga_ipsec_sa * hw_sa,int opcode)280 static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
281 					struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
282 					int opcode)
283 {
284 	struct mlx5_core_dev *dev = fdev->mdev;
285 	struct mlx5_ifc_fpga_ipsec_sa *sa;
286 	struct mlx5_fpga_ipsec_cmd_context *cmd_context;
287 	size_t sa_cmd_size;
288 	int err;
289 
290 	hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
291 	if (is_v2_sadb_supported(fdev->ipsec))
292 		sa_cmd_size = sizeof(*hw_sa);
293 	else
294 		sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
295 
296 	cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
297 			mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
298 	if (IS_ERR(cmd_context))
299 		return PTR_ERR(cmd_context);
300 
301 	err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
302 	if (err)
303 		goto out;
304 
305 	sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
306 	if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
307 		mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
308 			      ntohl(sa->ipsec_sa_v1.sw_sa_handle),
309 			      ntohl(cmd_context->resp.sw_sa_handle));
310 		err = -EIO;
311 	}
312 
313 out:
314 	kfree(cmd_context);
315 	return err;
316 }
317 
mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev * mdev)318 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
319 {
320 	struct mlx5_fpga_device *fdev = mdev->fpga;
321 	u32 ret = 0;
322 
323 	if (mlx5_fpga_is_ipsec_device(mdev)) {
324 		ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
325 		ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
326 	} else {
327 		return ret;
328 	}
329 
330 	if (!fdev->ipsec)
331 		return ret;
332 
333 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
334 		ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
335 
336 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
337 		ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
338 
339 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
340 		ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
341 
342 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
343 		ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
344 
345 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
346 		ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
347 		ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
348 	}
349 
350 	return ret;
351 }
352 
mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev * mdev)353 unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
354 {
355 	struct mlx5_fpga_device *fdev = mdev->fpga;
356 
357 	if (!fdev || !fdev->ipsec)
358 		return 0;
359 
360 	return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
361 			number_of_ipsec_counters);
362 }
363 
mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev * mdev,u64 * counters,unsigned int counters_count)364 int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
365 				  unsigned int counters_count)
366 {
367 	struct mlx5_fpga_device *fdev = mdev->fpga;
368 	unsigned int i;
369 	__be32 *data;
370 	u32 count;
371 	u64 addr;
372 	int ret;
373 
374 	if (!fdev || !fdev->ipsec)
375 		return 0;
376 
377 	addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
378 			     ipsec_counters_addr_low) +
379 	       ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
380 			     ipsec_counters_addr_high) << 32);
381 
382 	count = mlx5_fpga_ipsec_counters_count(mdev);
383 
384 	data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
385 	if (!data) {
386 		ret = -ENOMEM;
387 		goto out;
388 	}
389 
390 	ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
391 				 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
392 	if (ret < 0) {
393 		mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
394 			      ret);
395 		goto out;
396 	}
397 	ret = 0;
398 
399 	if (count > counters_count)
400 		count = counters_count;
401 
402 	/* Each counter is low word, then high. But each word is big-endian */
403 	for (i = 0; i < count; i++)
404 		counters[i] = (u64)ntohl(data[i * 2]) |
405 			      ((u64)ntohl(data[i * 2 + 1]) << 32);
406 
407 out:
408 	kfree(data);
409 	return ret;
410 }
411 
mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev * mdev,u32 flags)412 static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
413 {
414 	struct mlx5_fpga_ipsec_cmd_context *context;
415 	struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
416 	int err;
417 
418 	cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
419 	cmd.flags = htonl(flags);
420 	context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
421 	if (IS_ERR(context))
422 		return PTR_ERR(context);
423 
424 	err = mlx5_fpga_ipsec_cmd_wait(context);
425 	if (err)
426 		goto out;
427 
428 	if ((context->resp.flags & cmd.flags) != cmd.flags) {
429 		mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
430 			      cmd.flags,
431 			      context->resp.flags);
432 		err = -EIO;
433 	}
434 
435 out:
436 	kfree(context);
437 	return err;
438 }
439 
mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev * mdev)440 static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
441 {
442 	u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
443 	u32 flags = 0;
444 
445 	if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
446 		flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
447 
448 	return mlx5_fpga_ipsec_set_caps(mdev, flags);
449 }
450 
451 static void
mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev * mdev,const struct mlx5_accel_esp_xfrm_attrs * xfrm_attrs,struct mlx5_ifc_fpga_ipsec_sa * hw_sa)452 mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
453 			      const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
454 			      struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
455 {
456 	const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
457 
458 	/* key */
459 	memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
460 	       aes_gcm->key_len / 8);
461 	/* Duplicate 128 bit key twice according to HW layout */
462 	if (aes_gcm->key_len == 128)
463 		memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
464 		       aes_gcm->aes_key, aes_gcm->key_len / 8);
465 
466 	/* salt and seq_iv */
467 	memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
468 	       sizeof(aes_gcm->seq_iv));
469 	memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
470 	       sizeof(aes_gcm->salt));
471 
472 	/* esn */
473 	if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
474 		hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
475 		hw_sa->ipsec_sa_v1.flags |=
476 				(xfrm_attrs->flags &
477 				 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
478 					MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
479 		hw_sa->esn = htonl(xfrm_attrs->esn);
480 	} else {
481 		hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
482 		hw_sa->ipsec_sa_v1.flags &=
483 				~(xfrm_attrs->flags &
484 				  MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
485 					MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
486 		hw_sa->esn = 0;
487 	}
488 
489 	/* rx handle */
490 	hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
491 
492 	/* enc mode */
493 	switch (aes_gcm->key_len) {
494 	case 128:
495 		hw_sa->ipsec_sa_v1.enc_mode =
496 			MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
497 		break;
498 	case 256:
499 		hw_sa->ipsec_sa_v1.enc_mode =
500 			MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
501 		break;
502 	}
503 
504 	/* flags */
505 	hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
506 			MLX5_FPGA_IPSEC_SA_SPI_EN |
507 			MLX5_FPGA_IPSEC_SA_IP_ESP;
508 
509 	if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
510 		hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
511 	else
512 		hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
513 }
514 
515 static void
mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * xfrm_attrs,const __be32 saddr[4],const __be32 daddr[4],const __be32 spi,bool is_ipv6,struct mlx5_ifc_fpga_ipsec_sa * hw_sa)516 mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
517 			    struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
518 			    const __be32 saddr[4],
519 			    const __be32 daddr[4],
520 			    const __be32 spi, bool is_ipv6,
521 			    struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
522 {
523 	mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
524 
525 	/* IPs */
526 	memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
527 	memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
528 
529 	/* SPI */
530 	hw_sa->ipsec_sa_v1.spi = spi;
531 
532 	/* flags */
533 	if (is_ipv6)
534 		hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
535 }
536 
is_full_mask(const void * p,size_t len)537 static bool is_full_mask(const void *p, size_t len)
538 {
539 	WARN_ON(len % 4);
540 
541 	return !memchr_inv(p, 0xff, len);
542 }
543 
validate_fpga_full_mask(struct mlx5_core_dev * dev,const u32 * match_c,const u32 * match_v)544 static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
545 				    const u32 *match_c,
546 				    const u32 *match_v)
547 {
548 	const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
549 						 match_c,
550 						 misc_parameters);
551 	const void *headers_c = MLX5_ADDR_OF(fte_match_param,
552 					     match_c,
553 					     outer_headers);
554 	const void *headers_v = MLX5_ADDR_OF(fte_match_param,
555 					     match_v,
556 					     outer_headers);
557 
558 	if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
559 		const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
560 						    headers_c,
561 						    src_ipv4_src_ipv6.ipv4_layout.ipv4);
562 		const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
563 						    headers_c,
564 						    dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
565 
566 		if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
567 							      ipv4)) ||
568 		    !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
569 							      ipv4)))
570 			return false;
571 	} else {
572 		const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
573 						    headers_c,
574 						    src_ipv4_src_ipv6.ipv6_layout.ipv6);
575 		const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
576 						    headers_c,
577 						    dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
578 
579 		if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
580 							      ipv6)) ||
581 		    !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
582 							      ipv6)))
583 			return false;
584 	}
585 
586 	if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
587 				       outer_esp_spi),
588 			  MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
589 		return false;
590 
591 	return true;
592 }
593 
mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev * dev,u8 match_criteria_enable,const u32 * match_c,const u32 * match_v)594 static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
595 				    u8 match_criteria_enable,
596 				    const u32 *match_c,
597 				    const u32 *match_v)
598 {
599 	u32 ipsec_dev_caps = mlx5_accel_ipsec_device_caps(dev);
600 	bool ipv6_flow;
601 
602 	ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
603 
604 	if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
605 	    mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
606 	    mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
607 	    mlx5_fs_is_vxlan_flow(match_c) ||
608 	    !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
609 	      ipv6_flow))
610 		return false;
611 
612 	if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
613 		return false;
614 
615 	if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
616 	    mlx5_fs_is_outer_ipsec_flow(match_c))
617 		return false;
618 
619 	if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
620 	    ipv6_flow)
621 		return false;
622 
623 	if (!validate_fpga_full_mask(dev, match_c, match_v))
624 		return false;
625 
626 	return true;
627 }
628 
mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev * dev,u8 match_criteria_enable,const u32 * match_c,const u32 * match_v,struct mlx5_flow_act * flow_act)629 static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
630 					   u8 match_criteria_enable,
631 					   const u32 *match_c,
632 					   const u32 *match_v,
633 					   struct mlx5_flow_act *flow_act)
634 {
635 	const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
636 					   outer_headers);
637 	bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
638 			MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
639 	bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
640 			MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
641 	int ret;
642 
643 	ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
644 				      match_v);
645 	if (!ret)
646 		return ret;
647 
648 	if (is_dmac || is_smac ||
649 	    (match_criteria_enable &
650 	     ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
651 	    (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
652 	     flow_act->has_flow_tag)
653 		return false;
654 
655 	return true;
656 }
657 
mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm * accel_xfrm,const __be32 saddr[4],const __be32 daddr[4],const __be32 spi,bool is_ipv6)658 void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
659 				    struct mlx5_accel_esp_xfrm *accel_xfrm,
660 				    const __be32 saddr[4],
661 				    const __be32 daddr[4],
662 				    const __be32 spi, bool is_ipv6)
663 {
664 	struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
665 	struct mlx5_fpga_esp_xfrm *fpga_xfrm =
666 			container_of(accel_xfrm, typeof(*fpga_xfrm),
667 				     accel_xfrm);
668 	struct mlx5_fpga_device *fdev = mdev->fpga;
669 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
670 	int opcode, err;
671 	void *context;
672 
673 	/* alloc SA */
674 	sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
675 	if (!sa_ctx)
676 		return ERR_PTR(-ENOMEM);
677 
678 	sa_ctx->dev = mdev;
679 
680 	/* build candidate SA */
681 	mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
682 				    saddr, daddr, spi, is_ipv6,
683 				    &sa_ctx->hw_sa);
684 
685 	mutex_lock(&fpga_xfrm->lock);
686 
687 	if (fpga_xfrm->sa_ctx) {        /* multiple rules for same accel_xfrm */
688 		/* all rules must be with same IPs and SPI */
689 		if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
690 			   sizeof(sa_ctx->hw_sa))) {
691 			context = ERR_PTR(-EINVAL);
692 			goto exists;
693 		}
694 
695 		++fpga_xfrm->num_rules;
696 		context = fpga_xfrm->sa_ctx;
697 		goto exists;
698 	}
699 
700 	/* This is unbounded fpga_xfrm, try to add to hash */
701 	mutex_lock(&fipsec->sa_hash_lock);
702 
703 	err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
704 					    rhash_sa);
705 	if (err) {
706 		/* Can't bound different accel_xfrm to already existing sa_ctx.
707 		 * This is because we can't support multiple ketmats for
708 		 * same IPs and SPI
709 		 */
710 		context = ERR_PTR(-EEXIST);
711 		goto unlock_hash;
712 	}
713 
714 	/* Bound accel_xfrm to sa_ctx */
715 	opcode = is_v2_sadb_supported(fdev->ipsec) ?
716 			MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
717 			MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
718 	err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
719 	sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
720 	if (err) {
721 		context = ERR_PTR(err);
722 		goto delete_hash;
723 	}
724 
725 	mutex_unlock(&fipsec->sa_hash_lock);
726 
727 	++fpga_xfrm->num_rules;
728 	fpga_xfrm->sa_ctx = sa_ctx;
729 	sa_ctx->fpga_xfrm = fpga_xfrm;
730 
731 	mutex_unlock(&fpga_xfrm->lock);
732 
733 	return sa_ctx;
734 
735 delete_hash:
736 	WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
737 				       rhash_sa));
738 unlock_hash:
739 	mutex_unlock(&fipsec->sa_hash_lock);
740 
741 exists:
742 	mutex_unlock(&fpga_xfrm->lock);
743 	kfree(sa_ctx);
744 	return context;
745 }
746 
747 static void *
mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev * mdev,struct fs_fte * fte,bool is_egress)748 mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
749 				 struct fs_fte *fte,
750 				 bool is_egress)
751 {
752 	struct mlx5_accel_esp_xfrm *accel_xfrm;
753 	__be32 saddr[4], daddr[4], spi;
754 	struct mlx5_flow_group *fg;
755 	bool is_ipv6 = false;
756 
757 	fs_get_obj(fg, fte->node.parent);
758 	/* validate */
759 	if (is_egress &&
760 	    !mlx5_is_fpga_egress_ipsec_rule(mdev,
761 					    fg->mask.match_criteria_enable,
762 					    fg->mask.match_criteria,
763 					    fte->val,
764 					    &fte->action))
765 		return ERR_PTR(-EINVAL);
766 	else if (!mlx5_is_fpga_ipsec_rule(mdev,
767 					  fg->mask.match_criteria_enable,
768 					  fg->mask.match_criteria,
769 					  fte->val))
770 		return ERR_PTR(-EINVAL);
771 
772 	/* get xfrm context */
773 	accel_xfrm =
774 		(struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
775 
776 	/* IPs */
777 	if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
778 				       fte->val)) {
779 		memcpy(&saddr[3],
780 		       MLX5_ADDR_OF(fte_match_set_lyr_2_4,
781 				    fte->val,
782 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
783 				    sizeof(saddr[3]));
784 		memcpy(&daddr[3],
785 		       MLX5_ADDR_OF(fte_match_set_lyr_2_4,
786 				    fte->val,
787 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
788 				    sizeof(daddr[3]));
789 	} else {
790 		memcpy(saddr,
791 		       MLX5_ADDR_OF(fte_match_param,
792 				    fte->val,
793 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
794 				    sizeof(saddr));
795 		memcpy(daddr,
796 		       MLX5_ADDR_OF(fte_match_param,
797 				    fte->val,
798 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
799 				    sizeof(daddr));
800 		is_ipv6 = true;
801 	}
802 
803 	/* SPI */
804 	spi = MLX5_GET_BE(typeof(spi),
805 			  fte_match_param, fte->val,
806 			  misc_parameters.outer_esp_spi);
807 
808 	/* create */
809 	return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
810 					     saddr, daddr,
811 					     spi, is_ipv6);
812 }
813 
814 static void
mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx * sa_ctx)815 mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
816 {
817 	struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
818 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
819 	int opcode = is_v2_sadb_supported(fdev->ipsec) ?
820 			MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
821 			MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
822 	int err;
823 
824 	err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
825 	sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
826 	if (err) {
827 		WARN_ON(err);
828 		return;
829 	}
830 
831 	mutex_lock(&fipsec->sa_hash_lock);
832 	WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
833 				       rhash_sa));
834 	mutex_unlock(&fipsec->sa_hash_lock);
835 }
836 
mlx5_fpga_ipsec_delete_sa_ctx(void * context)837 void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
838 {
839 	struct mlx5_fpga_esp_xfrm *fpga_xfrm =
840 			((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
841 
842 	mutex_lock(&fpga_xfrm->lock);
843 	if (!--fpga_xfrm->num_rules) {
844 		mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
845 		fpga_xfrm->sa_ctx = NULL;
846 	}
847 	mutex_unlock(&fpga_xfrm->lock);
848 }
849 
850 static inline struct mlx5_fpga_ipsec_rule *
_rule_search(struct rb_root * root,struct fs_fte * fte)851 _rule_search(struct rb_root *root, struct fs_fte *fte)
852 {
853 	struct rb_node *node = root->rb_node;
854 
855 	while (node) {
856 		struct mlx5_fpga_ipsec_rule *rule =
857 				container_of(node, struct mlx5_fpga_ipsec_rule,
858 					     node);
859 
860 		if (rule->fte < fte)
861 			node = node->rb_left;
862 		else if (rule->fte > fte)
863 			node = node->rb_right;
864 		else
865 			return rule;
866 	}
867 	return NULL;
868 }
869 
870 static struct mlx5_fpga_ipsec_rule *
rule_search(struct mlx5_fpga_ipsec * ipsec_dev,struct fs_fte * fte)871 rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
872 {
873 	struct mlx5_fpga_ipsec_rule *rule;
874 
875 	mutex_lock(&ipsec_dev->rules_rb_lock);
876 	rule = _rule_search(&ipsec_dev->rules_rb, fte);
877 	mutex_unlock(&ipsec_dev->rules_rb_lock);
878 
879 	return rule;
880 }
881 
_rule_insert(struct rb_root * root,struct mlx5_fpga_ipsec_rule * rule)882 static inline int _rule_insert(struct rb_root *root,
883 			       struct mlx5_fpga_ipsec_rule *rule)
884 {
885 	struct rb_node **new = &root->rb_node, *parent = NULL;
886 
887 	/* Figure out where to put new node */
888 	while (*new) {
889 		struct mlx5_fpga_ipsec_rule *this =
890 				container_of(*new, struct mlx5_fpga_ipsec_rule,
891 					     node);
892 
893 		parent = *new;
894 		if (rule->fte < this->fte)
895 			new = &((*new)->rb_left);
896 		else if (rule->fte > this->fte)
897 			new = &((*new)->rb_right);
898 		else
899 			return -EEXIST;
900 	}
901 
902 	/* Add new node and rebalance tree. */
903 	rb_link_node(&rule->node, parent, new);
904 	rb_insert_color(&rule->node, root);
905 
906 	return 0;
907 }
908 
rule_insert(struct mlx5_fpga_ipsec * ipsec_dev,struct mlx5_fpga_ipsec_rule * rule)909 static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
910 		       struct mlx5_fpga_ipsec_rule *rule)
911 {
912 	int ret;
913 
914 	mutex_lock(&ipsec_dev->rules_rb_lock);
915 	ret = _rule_insert(&ipsec_dev->rules_rb, rule);
916 	mutex_unlock(&ipsec_dev->rules_rb_lock);
917 
918 	return ret;
919 }
920 
_rule_delete(struct mlx5_fpga_ipsec * ipsec_dev,struct mlx5_fpga_ipsec_rule * rule)921 static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
922 				struct mlx5_fpga_ipsec_rule *rule)
923 {
924 	struct rb_root *root = &ipsec_dev->rules_rb;
925 
926 	mutex_lock(&ipsec_dev->rules_rb_lock);
927 	rb_erase(&rule->node, root);
928 	mutex_unlock(&ipsec_dev->rules_rb_lock);
929 }
930 
rule_delete(struct mlx5_fpga_ipsec * ipsec_dev,struct mlx5_fpga_ipsec_rule * rule)931 static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
932 			struct mlx5_fpga_ipsec_rule *rule)
933 {
934 	_rule_delete(ipsec_dev, rule);
935 	kfree(rule);
936 }
937 
938 struct mailbox_mod {
939 	uintptr_t			saved_esp_id;
940 	u32				saved_action;
941 	u32				saved_outer_esp_spi_value;
942 };
943 
restore_spec_mailbox(struct fs_fte * fte,struct mailbox_mod * mbox_mod)944 static void restore_spec_mailbox(struct fs_fte *fte,
945 				 struct mailbox_mod *mbox_mod)
946 {
947 	char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
948 					   fte->val,
949 					   misc_parameters);
950 
951 	MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
952 		 mbox_mod->saved_outer_esp_spi_value);
953 	fte->action.action |= mbox_mod->saved_action;
954 	fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
955 }
956 
modify_spec_mailbox(struct mlx5_core_dev * mdev,struct fs_fte * fte,struct mailbox_mod * mbox_mod)957 static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
958 				struct fs_fte *fte,
959 				struct mailbox_mod *mbox_mod)
960 {
961 	char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
962 					   fte->val,
963 					   misc_parameters);
964 
965 	mbox_mod->saved_esp_id = fte->action.esp_id;
966 	mbox_mod->saved_action = fte->action.action &
967 			(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
968 			 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
969 	mbox_mod->saved_outer_esp_spi_value =
970 			MLX5_GET(fte_match_set_misc, misc_params_v,
971 				 outer_esp_spi);
972 
973 	fte->action.esp_id = 0;
974 	fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
975 				MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
976 	if (!MLX5_CAP_FLOWTABLE(mdev,
977 				flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
978 		MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
979 }
980 
egress_to_fs_ft(bool egress)981 static enum fs_flow_table_type egress_to_fs_ft(bool egress)
982 {
983 	return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
984 }
985 
fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 * in,unsigned int * group_id,bool is_egress)986 static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
987 					   struct mlx5_flow_table *ft,
988 					   u32 *in,
989 					   unsigned int *group_id,
990 					   bool is_egress)
991 {
992 	int (*create_flow_group)(struct mlx5_core_dev *dev,
993 				 struct mlx5_flow_table *ft, u32 *in,
994 				 unsigned int *group_id) =
995 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
996 	char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
997 					   match_criteria.misc_parameters);
998 	u32 saved_outer_esp_spi_mask;
999 	u8 match_criteria_enable;
1000 	int ret;
1001 
1002 	if (MLX5_CAP_FLOWTABLE(dev,
1003 			       flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1004 		return create_flow_group(dev, ft, in, group_id);
1005 
1006 	match_criteria_enable =
1007 		MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1008 	saved_outer_esp_spi_mask =
1009 		MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1010 	if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1011 		return create_flow_group(dev, ft, in, group_id);
1012 
1013 	MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1014 
1015 	if (!(*misc_params_c) &&
1016 	    !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1017 		MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1018 			 match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1019 
1020 	ret = create_flow_group(dev, ft, in, group_id);
1021 
1022 	MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1023 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1024 
1025 	return ret;
1026 }
1027 
fpga_ipsec_fs_create_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,struct fs_fte * fte,bool is_egress)1028 static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
1029 				    struct mlx5_flow_table *ft,
1030 				    struct mlx5_flow_group *fg,
1031 				    struct fs_fte *fte,
1032 				    bool is_egress)
1033 {
1034 	int (*create_fte)(struct mlx5_core_dev *dev,
1035 			  struct mlx5_flow_table *ft,
1036 			  struct mlx5_flow_group *fg,
1037 			  struct fs_fte *fte) =
1038 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1039 	struct mlx5_fpga_device *fdev = dev->fpga;
1040 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1041 	struct mlx5_fpga_ipsec_rule *rule;
1042 	bool is_esp = fte->action.esp_id;
1043 	struct mailbox_mod mbox_mod;
1044 	int ret;
1045 
1046 	if (!is_esp ||
1047 	    !(fte->action.action &
1048 	      (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1049 	       MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1050 		return create_fte(dev, ft, fg, fte);
1051 
1052 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1053 	if (!rule)
1054 		return -ENOMEM;
1055 
1056 	rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1057 	if (IS_ERR(rule->ctx)) {
1058 		int err = PTR_ERR(rule->ctx);
1059 		kfree(rule);
1060 		return err;
1061 	}
1062 
1063 	rule->fte = fte;
1064 	WARN_ON(rule_insert(fipsec, rule));
1065 
1066 	modify_spec_mailbox(dev, fte, &mbox_mod);
1067 	ret = create_fte(dev, ft, fg, fte);
1068 	restore_spec_mailbox(fte, &mbox_mod);
1069 	if (ret) {
1070 		_rule_delete(fipsec, rule);
1071 		mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1072 		kfree(rule);
1073 	}
1074 
1075 	return ret;
1076 }
1077 
fpga_ipsec_fs_update_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id,int modify_mask,struct fs_fte * fte,bool is_egress)1078 static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
1079 				    struct mlx5_flow_table *ft,
1080 				    unsigned int group_id,
1081 				    int modify_mask,
1082 				    struct fs_fte *fte,
1083 				    bool is_egress)
1084 {
1085 	int (*update_fte)(struct mlx5_core_dev *dev,
1086 			  struct mlx5_flow_table *ft,
1087 			  unsigned int group_id,
1088 			  int modify_mask,
1089 			  struct fs_fte *fte) =
1090 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1091 	bool is_esp = fte->action.esp_id;
1092 	struct mailbox_mod mbox_mod;
1093 	int ret;
1094 
1095 	if (!is_esp ||
1096 	    !(fte->action.action &
1097 	      (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1098 	       MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1099 		return update_fte(dev, ft, group_id, modify_mask, fte);
1100 
1101 	modify_spec_mailbox(dev, fte, &mbox_mod);
1102 	ret = update_fte(dev, ft, group_id, modify_mask, fte);
1103 	restore_spec_mailbox(fte, &mbox_mod);
1104 
1105 	return ret;
1106 }
1107 
fpga_ipsec_fs_delete_fte(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_fte * fte,bool is_egress)1108 static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
1109 				    struct mlx5_flow_table *ft,
1110 				    struct fs_fte *fte,
1111 				    bool is_egress)
1112 {
1113 	int (*delete_fte)(struct mlx5_core_dev *dev,
1114 			  struct mlx5_flow_table *ft,
1115 			  struct fs_fte *fte) =
1116 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1117 	struct mlx5_fpga_device *fdev = dev->fpga;
1118 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1119 	struct mlx5_fpga_ipsec_rule *rule;
1120 	bool is_esp = fte->action.esp_id;
1121 	struct mailbox_mod mbox_mod;
1122 	int ret;
1123 
1124 	if (!is_esp ||
1125 	    !(fte->action.action &
1126 	      (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1127 	       MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1128 		return delete_fte(dev, ft, fte);
1129 
1130 	rule = rule_search(fipsec, fte);
1131 	if (!rule)
1132 		return -ENOENT;
1133 
1134 	mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1135 	rule_delete(fipsec, rule);
1136 
1137 	modify_spec_mailbox(dev, fte, &mbox_mod);
1138 	ret = delete_fte(dev, ft, fte);
1139 	restore_spec_mailbox(fte, &mbox_mod);
1140 
1141 	return ret;
1142 }
1143 
1144 static int
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 * in,unsigned int * group_id)1145 mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
1146 					    struct mlx5_flow_table *ft,
1147 					    u32 *in,
1148 					    unsigned int *group_id)
1149 {
1150 	return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
1151 }
1152 
1153 static int
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,struct fs_fte * fte)1154 mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
1155 				     struct mlx5_flow_table *ft,
1156 				     struct mlx5_flow_group *fg,
1157 				     struct fs_fte *fte)
1158 {
1159 	return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
1160 }
1161 
1162 static int
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id,int modify_mask,struct fs_fte * fte)1163 mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
1164 				     struct mlx5_flow_table *ft,
1165 				     unsigned int group_id,
1166 				     int modify_mask,
1167 				     struct fs_fte *fte)
1168 {
1169 	return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1170 					true);
1171 }
1172 
1173 static int
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_fte * fte)1174 mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
1175 				     struct mlx5_flow_table *ft,
1176 				     struct fs_fte *fte)
1177 {
1178 	return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
1179 }
1180 
1181 static int
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,u32 * in,unsigned int * group_id)1182 mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
1183 					     struct mlx5_flow_table *ft,
1184 					     u32 *in,
1185 					     unsigned int *group_id)
1186 {
1187 	return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
1188 }
1189 
1190 static int
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,struct fs_fte * fte)1191 mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
1192 				      struct mlx5_flow_table *ft,
1193 				      struct mlx5_flow_group *fg,
1194 				      struct fs_fte *fte)
1195 {
1196 	return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
1197 }
1198 
1199 static int
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,unsigned int group_id,int modify_mask,struct fs_fte * fte)1200 mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
1201 				      struct mlx5_flow_table *ft,
1202 				      unsigned int group_id,
1203 				      int modify_mask,
1204 				      struct fs_fte *fte)
1205 {
1206 	return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
1207 					false);
1208 }
1209 
1210 static int
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct fs_fte * fte)1211 mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
1212 				      struct mlx5_flow_table *ft,
1213 				      struct fs_fte *fte)
1214 {
1215 	return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
1216 }
1217 
1218 static struct mlx5_flow_cmds fpga_ipsec_ingress;
1219 static struct mlx5_flow_cmds fpga_ipsec_egress;
1220 
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)1221 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1222 {
1223 	switch (type) {
1224 	case FS_FT_NIC_RX:
1225 		return &fpga_ipsec_ingress;
1226 	case FS_FT_NIC_TX:
1227 		return &fpga_ipsec_egress;
1228 	default:
1229 		WARN_ON(true);
1230 		return NULL;
1231 	}
1232 }
1233 
mlx5_fpga_ipsec_init(struct mlx5_core_dev * mdev)1234 int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1235 {
1236 	struct mlx5_fpga_conn_attr init_attr = {0};
1237 	struct mlx5_fpga_device *fdev = mdev->fpga;
1238 	struct mlx5_fpga_conn *conn;
1239 	int err;
1240 
1241 	if (!mlx5_fpga_is_ipsec_device(mdev))
1242 		return 0;
1243 
1244 	fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1245 	if (!fdev->ipsec)
1246 		return -ENOMEM;
1247 
1248 	fdev->ipsec->fdev = fdev;
1249 
1250 	err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1251 				     fdev->ipsec->caps);
1252 	if (err) {
1253 		mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1254 			      err);
1255 		goto error;
1256 	}
1257 
1258 	INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1259 	spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1260 
1261 	init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1262 	init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1263 	init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1264 	init_attr.cb_arg = fdev;
1265 	conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1266 	if (IS_ERR(conn)) {
1267 		err = PTR_ERR(conn);
1268 		mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1269 			      err);
1270 		goto error;
1271 	}
1272 	fdev->ipsec->conn = conn;
1273 
1274 	err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1275 	if (err)
1276 		goto err_destroy_conn;
1277 	mutex_init(&fdev->ipsec->sa_hash_lock);
1278 
1279 	fdev->ipsec->rules_rb = RB_ROOT;
1280 	mutex_init(&fdev->ipsec->rules_rb_lock);
1281 
1282 	err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1283 	if (err) {
1284 		mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1285 			      err);
1286 		goto err_destroy_hash;
1287 	}
1288 
1289 	return 0;
1290 
1291 err_destroy_hash:
1292 	rhashtable_destroy(&fdev->ipsec->sa_hash);
1293 
1294 err_destroy_conn:
1295 	mlx5_fpga_sbu_conn_destroy(conn);
1296 
1297 error:
1298 	kfree(fdev->ipsec);
1299 	fdev->ipsec = NULL;
1300 	return err;
1301 }
1302 
destroy_rules_rb(struct rb_root * root)1303 static void destroy_rules_rb(struct rb_root *root)
1304 {
1305 	struct mlx5_fpga_ipsec_rule *r, *tmp;
1306 
1307 	rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1308 		rb_erase(&r->node, root);
1309 		mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1310 		kfree(r);
1311 	}
1312 }
1313 
mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev * mdev)1314 void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1315 {
1316 	struct mlx5_fpga_device *fdev = mdev->fpga;
1317 
1318 	if (!mlx5_fpga_is_ipsec_device(mdev))
1319 		return;
1320 
1321 	destroy_rules_rb(&fdev->ipsec->rules_rb);
1322 	rhashtable_destroy(&fdev->ipsec->sa_hash);
1323 
1324 	mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1325 	kfree(fdev->ipsec);
1326 	fdev->ipsec = NULL;
1327 }
1328 
mlx5_fpga_ipsec_build_fs_cmds(void)1329 void mlx5_fpga_ipsec_build_fs_cmds(void)
1330 {
1331 	/* ingress */
1332 	fpga_ipsec_ingress.create_flow_table =
1333 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1334 	fpga_ipsec_ingress.destroy_flow_table =
1335 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1336 	fpga_ipsec_ingress.modify_flow_table =
1337 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1338 	fpga_ipsec_ingress.create_flow_group =
1339 		mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1340 	fpga_ipsec_ingress.destroy_flow_group =
1341 		 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1342 	fpga_ipsec_ingress.create_fte =
1343 		mlx5_fpga_ipsec_fs_create_fte_ingress;
1344 	fpga_ipsec_ingress.update_fte =
1345 		mlx5_fpga_ipsec_fs_update_fte_ingress;
1346 	fpga_ipsec_ingress.delete_fte =
1347 		mlx5_fpga_ipsec_fs_delete_fte_ingress;
1348 	fpga_ipsec_ingress.update_root_ft =
1349 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1350 
1351 	/* egress */
1352 	fpga_ipsec_egress.create_flow_table =
1353 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1354 	fpga_ipsec_egress.destroy_flow_table =
1355 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1356 	fpga_ipsec_egress.modify_flow_table =
1357 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1358 	fpga_ipsec_egress.create_flow_group =
1359 		mlx5_fpga_ipsec_fs_create_flow_group_egress;
1360 	fpga_ipsec_egress.destroy_flow_group =
1361 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1362 	fpga_ipsec_egress.create_fte =
1363 		mlx5_fpga_ipsec_fs_create_fte_egress;
1364 	fpga_ipsec_egress.update_fte =
1365 		mlx5_fpga_ipsec_fs_update_fte_egress;
1366 	fpga_ipsec_egress.delete_fte =
1367 		mlx5_fpga_ipsec_fs_delete_fte_egress;
1368 	fpga_ipsec_egress.update_root_ft =
1369 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1370 }
1371 
1372 static int
mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev * mdev,const struct mlx5_accel_esp_xfrm_attrs * attrs)1373 mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1374 				  const struct mlx5_accel_esp_xfrm_attrs *attrs)
1375 {
1376 	if (attrs->tfc_pad) {
1377 		mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1378 		return -EOPNOTSUPP;
1379 	}
1380 
1381 	if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1382 		mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1383 		return -EOPNOTSUPP;
1384 	}
1385 
1386 	if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1387 		mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1388 		return -EOPNOTSUPP;
1389 	}
1390 
1391 	if (attrs->keymat.aes_gcm.iv_algo !=
1392 	    MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1393 		mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1394 		return -EOPNOTSUPP;
1395 	}
1396 
1397 	if (attrs->keymat.aes_gcm.icv_len != 128) {
1398 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1399 		return -EOPNOTSUPP;
1400 	}
1401 
1402 	if (attrs->keymat.aes_gcm.key_len != 128 &&
1403 	    attrs->keymat.aes_gcm.key_len != 256) {
1404 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1405 		return -EOPNOTSUPP;
1406 	}
1407 
1408 	if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1409 	    (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1410 		       v2_command))) {
1411 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1412 		return -EOPNOTSUPP;
1413 	}
1414 
1415 	return 0;
1416 }
1417 
1418 struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev * mdev,const struct mlx5_accel_esp_xfrm_attrs * attrs,u32 flags)1419 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1420 			  const struct mlx5_accel_esp_xfrm_attrs *attrs,
1421 			  u32 flags)
1422 {
1423 	struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1424 
1425 	if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1426 		mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1427 		return ERR_PTR(-EINVAL);
1428 	}
1429 
1430 	if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1431 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1432 		return ERR_PTR(-EOPNOTSUPP);
1433 	}
1434 
1435 	fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1436 	if (!fpga_xfrm)
1437 		return ERR_PTR(-ENOMEM);
1438 
1439 	mutex_init(&fpga_xfrm->lock);
1440 	memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1441 	       sizeof(fpga_xfrm->accel_xfrm.attrs));
1442 
1443 	return &fpga_xfrm->accel_xfrm;
1444 }
1445 
mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm * xfrm)1446 void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1447 {
1448 	struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1449 			container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1450 				     accel_xfrm);
1451 	/* assuming no sa_ctx are connected to this xfrm_ctx */
1452 	kfree(fpga_xfrm);
1453 }
1454 
mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm * xfrm,const struct mlx5_accel_esp_xfrm_attrs * attrs)1455 int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1456 			      const struct mlx5_accel_esp_xfrm_attrs *attrs)
1457 {
1458 	struct mlx5_core_dev *mdev = xfrm->mdev;
1459 	struct mlx5_fpga_device *fdev = mdev->fpga;
1460 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1461 	struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1462 	struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1463 
1464 	int err = 0;
1465 
1466 	if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1467 		return 0;
1468 
1469 	if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1470 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1471 		return -EOPNOTSUPP;
1472 	}
1473 
1474 	if (is_v2_sadb_supported(fipsec)) {
1475 		mlx5_core_warn(mdev, "Modify esp is not supported\n");
1476 		return -EOPNOTSUPP;
1477 	}
1478 
1479 	fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1480 
1481 	mutex_lock(&fpga_xfrm->lock);
1482 
1483 	if (!fpga_xfrm->sa_ctx)
1484 		/* Unbounded xfrm, chane only sw attrs */
1485 		goto change_sw_xfrm_attrs;
1486 
1487 	/* copy original hw sa */
1488 	memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1489 	mutex_lock(&fipsec->sa_hash_lock);
1490 	/* remove original hw sa from hash */
1491 	WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1492 				       &fpga_xfrm->sa_ctx->hash, rhash_sa));
1493 	/* update hw_sa with new xfrm attrs*/
1494 	mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1495 				      &fpga_xfrm->sa_ctx->hw_sa);
1496 	/* try to insert new hw_sa to hash */
1497 	err = rhashtable_insert_fast(&fipsec->sa_hash,
1498 				     &fpga_xfrm->sa_ctx->hash, rhash_sa);
1499 	if (err)
1500 		goto rollback_sa;
1501 
1502 	/* modify device with new hw_sa */
1503 	err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1504 					   MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1505 	fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1506 	if (err)
1507 		WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1508 					       &fpga_xfrm->sa_ctx->hash,
1509 					       rhash_sa));
1510 rollback_sa:
1511 	if (err) {
1512 		/* return original hw_sa to hash */
1513 		memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1514 		       sizeof(org_hw_sa));
1515 		WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1516 					       &fpga_xfrm->sa_ctx->hash,
1517 					       rhash_sa));
1518 	}
1519 	mutex_unlock(&fipsec->sa_hash_lock);
1520 
1521 change_sw_xfrm_attrs:
1522 	if (!err)
1523 		memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1524 	mutex_unlock(&fpga_xfrm->lock);
1525 	return err;
1526 }
1527