1 /*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38
39 #include "en.h"
40 #include "ipsec.h"
41 #include "ipsec_rxtx.h"
42
to_ipsec_sa_entry(struct xfrm_state * x)43 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
44 {
45 return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
46 }
47
mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec * ipsec,unsigned int handle)48 struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
49 unsigned int handle)
50 {
51 struct mlx5e_ipsec_sa_entry *sa_entry;
52 struct xfrm_state *ret = NULL;
53
54 rcu_read_lock();
55 hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
56 if (sa_entry->handle == handle) {
57 ret = sa_entry->x;
58 xfrm_state_hold(ret);
59 break;
60 }
61 rcu_read_unlock();
62
63 return ret;
64 }
65
mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry * sa_entry)66 static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
67 {
68 unsigned int handle = sa_entry->ipsec_obj_id;
69 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
70 struct mlx5e_ipsec_sa_entry *_sa_entry;
71 unsigned long flags;
72
73 rcu_read_lock();
74 hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
75 if (_sa_entry->handle == handle) {
76 rcu_read_unlock();
77 return -EEXIST;
78 }
79 rcu_read_unlock();
80
81 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
82 sa_entry->handle = handle;
83 hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
84 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
85
86 return 0;
87 }
88
mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry * sa_entry)89 static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
90 {
91 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
92 unsigned long flags;
93
94 spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
95 hash_del_rcu(&sa_entry->hlist);
96 spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
97 }
98
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)99 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
100 {
101 struct xfrm_replay_state_esn *replay_esn;
102 u32 seq_bottom = 0;
103 u8 overlap;
104
105 if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
106 sa_entry->esn_state.trigger = 0;
107 return false;
108 }
109
110 replay_esn = sa_entry->x->replay_esn;
111 if (replay_esn->seq >= replay_esn->replay_window)
112 seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
113
114 overlap = sa_entry->esn_state.overlap;
115
116 sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
117 htonl(seq_bottom));
118
119 sa_entry->esn_state.trigger = 1;
120 if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
121 sa_entry->esn_state.overlap = 0;
122 return true;
123 } else if (unlikely(!overlap &&
124 (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
125 sa_entry->esn_state.overlap = 1;
126 return true;
127 }
128
129 return false;
130 }
131
132 static void
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)133 mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
134 struct mlx5_accel_esp_xfrm_attrs *attrs)
135 {
136 struct xfrm_state *x = sa_entry->x;
137 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
138 struct aead_geniv_ctx *geniv_ctx;
139 struct crypto_aead *aead;
140 unsigned int crypto_data_len, key_len;
141 int ivsize;
142
143 memset(attrs, 0, sizeof(*attrs));
144
145 /* key */
146 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
147 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
148
149 memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
150 aes_gcm->key_len = key_len * 8;
151
152 /* salt and seq_iv */
153 aead = x->data;
154 geniv_ctx = crypto_aead_ctx(aead);
155 ivsize = crypto_aead_ivsize(aead);
156 memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
157 memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
158 sizeof(aes_gcm->salt));
159
160 /* iv len */
161 aes_gcm->icv_len = x->aead->alg_icv_len;
162
163 /* esn */
164 if (sa_entry->esn_state.trigger) {
165 attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
166 attrs->esn = sa_entry->esn_state.esn;
167 if (sa_entry->esn_state.overlap)
168 attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
169 }
170
171 /* action */
172 attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
173 MLX5_ACCEL_ESP_ACTION_ENCRYPT :
174 MLX5_ACCEL_ESP_ACTION_DECRYPT;
175 /* flags */
176 attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
177 MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
178 MLX5_ACCEL_ESP_FLAGS_TUNNEL;
179
180 /* spi */
181 attrs->spi = be32_to_cpu(x->id.spi);
182
183 /* source , destination ips */
184 memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
185 memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
186 attrs->is_ipv6 = (x->props.family != AF_INET);
187 }
188
mlx5e_xfrm_validate_state(struct xfrm_state * x)189 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
190 {
191 struct net_device *netdev = x->xso.real_dev;
192 struct mlx5e_priv *priv;
193
194 priv = netdev_priv(netdev);
195
196 if (x->props.aalgo != SADB_AALG_NONE) {
197 netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
198 return -EINVAL;
199 }
200 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
201 netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
202 return -EINVAL;
203 }
204 if (x->props.calgo != SADB_X_CALG_NONE) {
205 netdev_info(netdev, "Cannot offload compressed xfrm states\n");
206 return -EINVAL;
207 }
208 if (x->props.flags & XFRM_STATE_ESN &&
209 !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
210 netdev_info(netdev, "Cannot offload ESN xfrm states\n");
211 return -EINVAL;
212 }
213 if (x->props.family != AF_INET &&
214 x->props.family != AF_INET6) {
215 netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
216 return -EINVAL;
217 }
218 if (x->props.mode != XFRM_MODE_TRANSPORT &&
219 x->props.mode != XFRM_MODE_TUNNEL) {
220 dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
221 return -EINVAL;
222 }
223 if (x->id.proto != IPPROTO_ESP) {
224 netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
225 return -EINVAL;
226 }
227 if (x->encap) {
228 netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
229 return -EINVAL;
230 }
231 if (!x->aead) {
232 netdev_info(netdev, "Cannot offload xfrm states without aead\n");
233 return -EINVAL;
234 }
235 if (x->aead->alg_icv_len != 128) {
236 netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
237 return -EINVAL;
238 }
239 if ((x->aead->alg_key_len != 128 + 32) &&
240 (x->aead->alg_key_len != 256 + 32)) {
241 netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
242 return -EINVAL;
243 }
244 if (x->tfcpad) {
245 netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
246 return -EINVAL;
247 }
248 if (!x->geniv) {
249 netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
250 return -EINVAL;
251 }
252 if (strcmp(x->geniv, "seqiv")) {
253 netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
254 return -EINVAL;
255 }
256 return 0;
257 }
258
_update_xfrm_state(struct work_struct * work)259 static void _update_xfrm_state(struct work_struct *work)
260 {
261 struct mlx5e_ipsec_modify_state_work *modify_work =
262 container_of(work, struct mlx5e_ipsec_modify_state_work, work);
263 struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
264 modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
265
266 mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
267 }
268
mlx5e_xfrm_add_state(struct xfrm_state * x)269 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
270 {
271 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
272 struct net_device *netdev = x->xso.real_dev;
273 struct mlx5e_priv *priv;
274 int err;
275
276 priv = netdev_priv(netdev);
277 if (!priv->ipsec)
278 return -EOPNOTSUPP;
279
280 err = mlx5e_xfrm_validate_state(x);
281 if (err)
282 return err;
283
284 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
285 if (!sa_entry) {
286 err = -ENOMEM;
287 goto out;
288 }
289
290 sa_entry->x = x;
291 sa_entry->ipsec = priv->ipsec;
292
293 /* check esn */
294 mlx5e_ipsec_update_esn_state(sa_entry);
295
296 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
297 /* create hw context */
298 err = mlx5_ipsec_create_sa_ctx(sa_entry);
299 if (err)
300 goto err_xfrm;
301
302 err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
303 if (err)
304 goto err_hw_ctx;
305
306 if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
307 err = mlx5e_ipsec_sadb_rx_add(sa_entry);
308 if (err)
309 goto err_add_rule;
310 } else {
311 sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
312 mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
313 }
314
315 INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
316 x->xso.offload_handle = (unsigned long)sa_entry;
317 goto out;
318
319 err_add_rule:
320 mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
321 err_hw_ctx:
322 mlx5_ipsec_free_sa_ctx(sa_entry);
323 err_xfrm:
324 kfree(sa_entry);
325 out:
326 return err;
327 }
328
mlx5e_xfrm_del_state(struct xfrm_state * x)329 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
330 {
331 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
332
333 if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
334 mlx5e_ipsec_sadb_rx_del(sa_entry);
335 }
336
mlx5e_xfrm_free_state(struct xfrm_state * x)337 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
338 {
339 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
340 struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
341
342 cancel_work_sync(&sa_entry->modify_work.work);
343 mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
344 mlx5_ipsec_free_sa_ctx(sa_entry);
345 kfree(sa_entry);
346 }
347
mlx5e_ipsec_init(struct mlx5e_priv * priv)348 int mlx5e_ipsec_init(struct mlx5e_priv *priv)
349 {
350 struct mlx5e_ipsec *ipsec;
351 int ret;
352
353 if (!mlx5_ipsec_device_caps(priv->mdev)) {
354 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
355 return 0;
356 }
357
358 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
359 if (!ipsec)
360 return -ENOMEM;
361
362 hash_init(ipsec->sadb_rx);
363 spin_lock_init(&ipsec->sadb_rx_lock);
364 ipsec->mdev = priv->mdev;
365 ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
366 priv->netdev->name);
367 if (!ipsec->wq) {
368 ret = -ENOMEM;
369 goto err_wq;
370 }
371
372 ret = mlx5e_accel_ipsec_fs_init(ipsec);
373 if (ret)
374 goto err_fs_init;
375
376 priv->ipsec = ipsec;
377 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
378 return 0;
379
380 err_fs_init:
381 destroy_workqueue(ipsec->wq);
382 err_wq:
383 kfree(ipsec);
384 return (ret != -EOPNOTSUPP) ? ret : 0;
385 }
386
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)387 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
388 {
389 struct mlx5e_ipsec *ipsec = priv->ipsec;
390
391 if (!ipsec)
392 return;
393
394 mlx5e_accel_ipsec_fs_cleanup(ipsec);
395 destroy_workqueue(ipsec->wq);
396 kfree(ipsec);
397 priv->ipsec = NULL;
398 }
399
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)400 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
401 {
402 if (x->props.family == AF_INET) {
403 /* Offload with IPv4 options is not supported yet */
404 if (ip_hdr(skb)->ihl > 5)
405 return false;
406 } else {
407 /* Offload with IPv6 extension headers is not support yet */
408 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
409 return false;
410 }
411
412 return true;
413 }
414
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)415 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
416 {
417 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
418 struct mlx5e_ipsec_modify_state_work *modify_work =
419 &sa_entry->modify_work;
420 bool need_update;
421
422 need_update = mlx5e_ipsec_update_esn_state(sa_entry);
423 if (!need_update)
424 return;
425
426 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
427 queue_work(sa_entry->ipsec->wq, &modify_work->work);
428 }
429
430 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
431 .xdo_dev_state_add = mlx5e_xfrm_add_state,
432 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
433 .xdo_dev_state_free = mlx5e_xfrm_free_state,
434 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
435 .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
436 };
437
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)438 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
439 {
440 struct mlx5_core_dev *mdev = priv->mdev;
441 struct net_device *netdev = priv->netdev;
442
443 if (!mlx5_ipsec_device_caps(mdev))
444 return;
445
446 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
447 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
448 netdev->features |= NETIF_F_HW_ESP;
449 netdev->hw_enc_features |= NETIF_F_HW_ESP;
450
451 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
452 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
453 return;
454 }
455
456 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
457 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
458
459 if (!MLX5_CAP_ETH(mdev, swp_lso)) {
460 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
461 return;
462 }
463
464 netdev->gso_partial_features |= NETIF_F_GSO_ESP;
465 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
466 netdev->features |= NETIF_F_GSO_ESP;
467 netdev->hw_features |= NETIF_F_GSO_ESP;
468 netdev->hw_enc_features |= NETIF_F_GSO_ESP;
469 }
470