1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include <net/inet6_hashtables.h>
5 #include "en_accel/en_accel.h"
6 #include "en_accel/tls.h"
7 #include "en_accel/ktls_txrx.h"
8 #include "en_accel/ktls_utils.h"
9 #include "en_accel/fs_tcp.h"
10
11 struct accel_rule {
12 struct work_struct work;
13 struct mlx5e_priv *priv;
14 struct mlx5_flow_handle *rule;
15 };
16
17 #define PROGRESS_PARAMS_WRITE_UNIT 64
18 #define PROGRESS_PARAMS_PADDED_SIZE \
19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \
20 PROGRESS_PARAMS_WRITE_UNIT))
21
22 struct mlx5e_ktls_rx_resync_buf {
23 union {
24 struct mlx5_wqe_tls_progress_params_seg progress;
25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE];
26 } ____cacheline_aligned_in_smp;
27 dma_addr_t dma_addr;
28 struct mlx5e_ktls_offload_context_rx *priv_rx;
29 };
30
31 enum {
32 MLX5E_PRIV_RX_FLAG_DELETING,
33 MLX5E_NUM_PRIV_RX_FLAGS,
34 };
35
36 struct mlx5e_ktls_rx_resync_ctx {
37 struct tls_offload_resync_async core;
38 struct work_struct work;
39 struct mlx5e_priv *priv;
40 refcount_t refcnt;
41 __be64 sw_rcd_sn_be;
42 u32 seq;
43 };
44
45 struct mlx5e_ktls_offload_context_rx {
46 struct tls12_crypto_info_aes_gcm_128 crypto_info;
47 struct accel_rule rule;
48 struct sock *sk;
49 struct mlx5e_rq_stats *stats;
50 struct completion add_ctx;
51 u32 tirn;
52 u32 key_id;
53 u32 rxq;
54 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
55
56 /* resync */
57 struct mlx5e_ktls_rx_resync_ctx resync;
58 };
59
mlx5e_ktls_create_tir(struct mlx5_core_dev * mdev,u32 * tirn,u32 rqtn)60 static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
61 {
62 int err, inlen;
63 void *tirc;
64 u32 *in;
65
66 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
67 in = kvzalloc(inlen, GFP_KERNEL);
68 if (!in)
69 return -ENOMEM;
70
71 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
72
73 MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
74 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
75 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
76 MLX5_SET(tirc, tirc, indirect_table, rqtn);
77 MLX5_SET(tirc, tirc, tls_en, 1);
78 MLX5_SET(tirc, tirc, self_lb_block,
79 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
80 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
81
82 err = mlx5_core_create_tir(mdev, in, tirn);
83
84 kvfree(in);
85 return err;
86 }
87
accel_rule_handle_work(struct work_struct * work)88 static void accel_rule_handle_work(struct work_struct *work)
89 {
90 struct mlx5e_ktls_offload_context_rx *priv_rx;
91 struct accel_rule *accel_rule;
92 struct mlx5_flow_handle *rule;
93
94 accel_rule = container_of(work, struct accel_rule, work);
95 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule);
96 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
97 goto out;
98
99 rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
100 priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG);
101 if (!IS_ERR_OR_NULL(rule))
102 accel_rule->rule = rule;
103 out:
104 complete(&priv_rx->add_ctx);
105 }
106
accel_rule_init(struct accel_rule * rule,struct mlx5e_priv * priv,struct sock * sk)107 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
108 struct sock *sk)
109 {
110 INIT_WORK(&rule->work, accel_rule_handle_work);
111 rule->priv = priv;
112 }
113
icosq_fill_wi(struct mlx5e_icosq * sq,u16 pi,struct mlx5e_icosq_wqe_info * wi)114 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi,
115 struct mlx5e_icosq_wqe_info *wi)
116 {
117 sq->db.wqe_info[pi] = *wi;
118 }
119
120 static struct mlx5_wqe_ctrl_seg *
post_static_params(struct mlx5e_icosq * sq,struct mlx5e_ktls_offload_context_rx * priv_rx)121 post_static_params(struct mlx5e_icosq *sq,
122 struct mlx5e_ktls_offload_context_rx *priv_rx)
123 {
124 struct mlx5e_set_tls_static_params_wqe *wqe;
125 struct mlx5e_icosq_wqe_info wi;
126 u16 pi, num_wqebbs, room;
127
128 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
129 room = mlx5e_stop_room_for_wqe(num_wqebbs);
130 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
131 return ERR_PTR(-ENOSPC);
132
133 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
134 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
135 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
136 priv_rx->tirn, priv_rx->key_id,
137 priv_rx->resync.seq, false,
138 TLS_OFFLOAD_CTX_DIR_RX);
139 wi = (struct mlx5e_icosq_wqe_info) {
140 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
141 .num_wqebbs = num_wqebbs,
142 .tls_set_params.priv_rx = priv_rx,
143 };
144 icosq_fill_wi(sq, pi, &wi);
145 sq->pc += num_wqebbs;
146
147 return &wqe->ctrl;
148 }
149
150 static struct mlx5_wqe_ctrl_seg *
post_progress_params(struct mlx5e_icosq * sq,struct mlx5e_ktls_offload_context_rx * priv_rx,u32 next_record_tcp_sn)151 post_progress_params(struct mlx5e_icosq *sq,
152 struct mlx5e_ktls_offload_context_rx *priv_rx,
153 u32 next_record_tcp_sn)
154 {
155 struct mlx5e_set_tls_progress_params_wqe *wqe;
156 struct mlx5e_icosq_wqe_info wi;
157 u16 pi, num_wqebbs, room;
158
159 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
160 room = mlx5e_stop_room_for_wqe(num_wqebbs);
161 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
162 return ERR_PTR(-ENOSPC);
163
164 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
165 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
166 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false,
167 next_record_tcp_sn,
168 TLS_OFFLOAD_CTX_DIR_RX);
169 wi = (struct mlx5e_icosq_wqe_info) {
170 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
171 .num_wqebbs = num_wqebbs,
172 .tls_set_params.priv_rx = priv_rx,
173 };
174
175 icosq_fill_wi(sq, pi, &wi);
176 sq->pc += num_wqebbs;
177
178 return &wqe->ctrl;
179 }
180
post_rx_param_wqes(struct mlx5e_channel * c,struct mlx5e_ktls_offload_context_rx * priv_rx,u32 next_record_tcp_sn)181 static int post_rx_param_wqes(struct mlx5e_channel *c,
182 struct mlx5e_ktls_offload_context_rx *priv_rx,
183 u32 next_record_tcp_sn)
184 {
185 struct mlx5_wqe_ctrl_seg *cseg;
186 struct mlx5e_icosq *sq;
187 int err;
188
189 err = 0;
190 sq = &c->async_icosq;
191 spin_lock_bh(&c->async_icosq_lock);
192
193 cseg = post_static_params(sq, priv_rx);
194 if (IS_ERR(cseg))
195 goto err_out;
196 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn);
197 if (IS_ERR(cseg))
198 goto err_out;
199
200 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
201 unlock:
202 spin_unlock_bh(&c->async_icosq_lock);
203
204 return err;
205
206 err_out:
207 priv_rx->stats->tls_resync_req_skip++;
208 err = PTR_ERR(cseg);
209 complete(&priv_rx->add_ctx);
210 goto unlock;
211 }
212
213 static void
mlx5e_set_ktls_rx_priv_ctx(struct tls_context * tls_ctx,struct mlx5e_ktls_offload_context_rx * priv_rx)214 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
215 struct mlx5e_ktls_offload_context_rx *priv_rx)
216 {
217 struct mlx5e_ktls_offload_context_rx **ctx =
218 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
219
220 BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
221 TLS_OFFLOAD_CONTEXT_SIZE_RX);
222
223 *ctx = priv_rx;
224 }
225
226 static struct mlx5e_ktls_offload_context_rx *
mlx5e_get_ktls_rx_priv_ctx(struct tls_context * tls_ctx)227 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
228 {
229 struct mlx5e_ktls_offload_context_rx **ctx =
230 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
231
232 return *ctx;
233 }
234
235 /* Re-sync */
236 /* Runs in work context */
237 static int
resync_post_get_progress_params(struct mlx5e_icosq * sq,struct mlx5e_ktls_offload_context_rx * priv_rx)238 resync_post_get_progress_params(struct mlx5e_icosq *sq,
239 struct mlx5e_ktls_offload_context_rx *priv_rx)
240 {
241 struct mlx5e_get_tls_progress_params_wqe *wqe;
242 struct mlx5e_ktls_rx_resync_buf *buf;
243 struct mlx5e_icosq_wqe_info wi;
244 struct mlx5_wqe_ctrl_seg *cseg;
245 struct mlx5_seg_get_psv *psv;
246 struct device *pdev;
247 int err;
248 u16 pi;
249
250 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
251 if (unlikely(!buf)) {
252 err = -ENOMEM;
253 goto err_out;
254 }
255
256 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
257 buf->dma_addr = dma_map_single(pdev, &buf->progress,
258 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
259 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
260 err = -ENOMEM;
261 goto err_free;
262 }
263
264 buf->priv_rx = priv_rx;
265
266 BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
267
268 spin_lock_bh(&sq->channel->async_icosq_lock);
269
270 if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
271 spin_unlock_bh(&sq->channel->async_icosq_lock);
272 err = -ENOSPC;
273 goto err_dma_unmap;
274 }
275
276 pi = mlx5e_icosq_get_next_pi(sq, 1);
277 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
278
279 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
280
281 cseg = &wqe->ctrl;
282 cseg->opmod_idx_opcode =
283 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV |
284 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24));
285 cseg->qpn_ds =
286 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT);
287
288 psv = &wqe->psv;
289 psv->num_psv = 1 << 4;
290 psv->l_key = sq->channel->mkey_be;
291 psv->psv_index[0] = cpu_to_be32(priv_rx->tirn);
292 psv->va = cpu_to_be64(buf->dma_addr);
293
294 wi = (struct mlx5e_icosq_wqe_info) {
295 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
296 .num_wqebbs = 1,
297 .tls_get_params.buf = buf,
298 };
299 icosq_fill_wi(sq, pi, &wi);
300 sq->pc++;
301 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
302 spin_unlock_bh(&sq->channel->async_icosq_lock);
303
304 return 0;
305
306 err_dma_unmap:
307 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
308 err_free:
309 kfree(buf);
310 err_out:
311 priv_rx->stats->tls_resync_req_skip++;
312 return err;
313 }
314
315 /* Function is called with elevated refcount.
316 * It decreases it only if no WQE is posted.
317 */
resync_handle_work(struct work_struct * work)318 static void resync_handle_work(struct work_struct *work)
319 {
320 struct mlx5e_ktls_offload_context_rx *priv_rx;
321 struct mlx5e_ktls_rx_resync_ctx *resync;
322 struct mlx5e_channel *c;
323 struct mlx5e_icosq *sq;
324
325 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
326 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
327
328 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
329 refcount_dec(&resync->refcnt);
330 return;
331 }
332
333 c = resync->priv->channels.c[priv_rx->rxq];
334 sq = &c->async_icosq;
335
336 if (resync_post_get_progress_params(sq, priv_rx))
337 refcount_dec(&resync->refcnt);
338 }
339
resync_init(struct mlx5e_ktls_rx_resync_ctx * resync,struct mlx5e_priv * priv)340 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
341 struct mlx5e_priv *priv)
342 {
343 INIT_WORK(&resync->work, resync_handle_work);
344 resync->priv = priv;
345 refcount_set(&resync->refcnt, 1);
346 }
347
348 /* Function can be called with the refcount being either elevated or not.
349 * It does not affect the refcount.
350 */
resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx * priv_rx,struct mlx5e_channel * c)351 static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
352 struct mlx5e_channel *c)
353 {
354 struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
355 struct mlx5_wqe_ctrl_seg *cseg;
356 struct mlx5e_icosq *sq;
357 int err;
358
359 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
360 err = 0;
361
362 sq = &c->async_icosq;
363 spin_lock_bh(&c->async_icosq_lock);
364
365 cseg = post_static_params(sq, priv_rx);
366 if (IS_ERR(cseg)) {
367 priv_rx->stats->tls_resync_res_skip++;
368 err = PTR_ERR(cseg);
369 goto unlock;
370 }
371 /* Do not increment priv_rx refcnt, CQE handling is empty */
372 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
373 priv_rx->stats->tls_resync_res_ok++;
374 unlock:
375 spin_unlock_bh(&c->async_icosq_lock);
376
377 return err;
378 }
379
380 /* Function is called with elevated refcount, it decreases it. */
mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info * wi,struct mlx5e_icosq * sq)381 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
382 struct mlx5e_icosq *sq)
383 {
384 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
385 struct mlx5e_ktls_offload_context_rx *priv_rx;
386 struct mlx5e_ktls_rx_resync_ctx *resync;
387 u8 tracker_state, auth_state, *ctx;
388 struct device *dev;
389 u32 hw_seq;
390
391 priv_rx = buf->priv_rx;
392 resync = &priv_rx->resync;
393 dev = mlx5_core_dma_dev(resync->priv->mdev);
394 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
395 goto out;
396
397 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
398 DMA_FROM_DEVICE);
399
400 ctx = buf->progress.ctx;
401 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
402 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
403 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
404 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
405 priv_rx->stats->tls_resync_req_skip++;
406 goto out;
407 }
408
409 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
410 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
411 priv_rx->stats->tls_resync_req_end++;
412 out:
413 refcount_dec(&resync->refcnt);
414 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
415 kfree(buf);
416 }
417
418 /* Runs in NAPI.
419 * Function elevates the refcount, unless no work is queued.
420 */
resync_queue_get_psv(struct sock * sk)421 static bool resync_queue_get_psv(struct sock *sk)
422 {
423 struct mlx5e_ktls_offload_context_rx *priv_rx;
424 struct mlx5e_ktls_rx_resync_ctx *resync;
425
426 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
427 if (unlikely(!priv_rx))
428 return false;
429
430 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
431 return false;
432
433 resync = &priv_rx->resync;
434 refcount_inc(&resync->refcnt);
435 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
436 refcount_dec(&resync->refcnt);
437
438 return true;
439 }
440
441 /* Runs in NAPI */
resync_update_sn(struct mlx5e_rq * rq,struct sk_buff * skb)442 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
443 {
444 struct ethhdr *eth = (struct ethhdr *)(skb->data);
445 struct net_device *netdev = rq->netdev;
446 struct sock *sk = NULL;
447 unsigned int datalen;
448 struct iphdr *iph;
449 struct tcphdr *th;
450 __be32 seq;
451 int depth = 0;
452
453 __vlan_get_protocol(skb, eth->h_proto, &depth);
454 iph = (struct iphdr *)(skb->data + depth);
455
456 if (iph->version == 4) {
457 depth += sizeof(struct iphdr);
458 th = (void *)iph + sizeof(struct iphdr);
459
460 sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
461 iph->saddr, th->source, iph->daddr,
462 th->dest, netdev->ifindex);
463 #if IS_ENABLED(CONFIG_IPV6)
464 } else {
465 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
466
467 depth += sizeof(struct ipv6hdr);
468 th = (void *)ipv6h + sizeof(struct ipv6hdr);
469
470 sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
471 &ipv6h->saddr, th->source,
472 &ipv6h->daddr, ntohs(th->dest),
473 netdev->ifindex, 0);
474 #endif
475 }
476
477 depth += sizeof(struct tcphdr);
478
479 if (unlikely(!sk))
480 return;
481
482 if (unlikely(sk->sk_state == TCP_TIME_WAIT))
483 goto unref;
484
485 if (unlikely(!resync_queue_get_psv(sk)))
486 goto unref;
487
488 seq = th->seq;
489 datalen = skb->len - depth;
490 tls_offload_rx_resync_async_request_start(sk, seq, datalen);
491 rq->stats->tls_resync_req_start++;
492
493 unref:
494 sock_gen_put(sk);
495 }
496
mlx5e_ktls_rx_resync(struct net_device * netdev,struct sock * sk,u32 seq,u8 * rcd_sn)497 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
498 u32 seq, u8 *rcd_sn)
499 {
500 struct mlx5e_ktls_offload_context_rx *priv_rx;
501 struct mlx5e_ktls_rx_resync_ctx *resync;
502 struct mlx5e_priv *priv;
503 struct mlx5e_channel *c;
504
505 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
506 if (unlikely(!priv_rx))
507 return;
508
509 resync = &priv_rx->resync;
510 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn;
511 resync->seq = seq;
512
513 priv = netdev_priv(netdev);
514 c = priv->channels.c[priv_rx->rxq];
515
516 resync_handle_seq_match(priv_rx, c);
517 }
518
519 /* End of resync section */
520
mlx5e_ktls_handle_rx_skb(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5_cqe64 * cqe,u32 * cqe_bcnt)521 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
522 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
523 {
524 struct mlx5e_rq_stats *stats = rq->stats;
525
526 switch (get_cqe_tls_offload(cqe)) {
527 case CQE_TLS_OFFLOAD_DECRYPTED:
528 skb->decrypted = 1;
529 stats->tls_decrypted_packets++;
530 stats->tls_decrypted_bytes += *cqe_bcnt;
531 break;
532 case CQE_TLS_OFFLOAD_RESYNC:
533 stats->tls_resync_req_pkt++;
534 resync_update_sn(rq, skb);
535 break;
536 default: /* CQE_TLS_OFFLOAD_ERROR: */
537 stats->tls_err++;
538 break;
539 }
540 }
541
mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info * wi)542 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
543 {
544 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx;
545 struct accel_rule *rule = &priv_rx->rule;
546
547 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
548 complete(&priv_rx->add_ctx);
549 return;
550 }
551 queue_work(rule->priv->tls->rx_wq, &rule->work);
552 }
553
mlx5e_ktls_sk_get_rxq(struct sock * sk)554 static int mlx5e_ktls_sk_get_rxq(struct sock *sk)
555 {
556 int rxq = sk_rx_queue_get(sk);
557
558 if (unlikely(rxq == -1))
559 rxq = 0;
560
561 return rxq;
562 }
563
mlx5e_ktls_add_rx(struct net_device * netdev,struct sock * sk,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)564 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
565 struct tls_crypto_info *crypto_info,
566 u32 start_offload_tcp_sn)
567 {
568 struct mlx5e_ktls_offload_context_rx *priv_rx;
569 struct mlx5e_ktls_rx_resync_ctx *resync;
570 struct tls_context *tls_ctx;
571 struct mlx5_core_dev *mdev;
572 struct mlx5e_priv *priv;
573 int rxq, err;
574 u32 rqtn;
575
576 tls_ctx = tls_get_ctx(sk);
577 priv = netdev_priv(netdev);
578 mdev = priv->mdev;
579 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
580 if (unlikely(!priv_rx))
581 return -ENOMEM;
582
583 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
584 if (err)
585 goto err_create_key;
586
587 priv_rx->crypto_info =
588 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
589
590 rxq = mlx5e_ktls_sk_get_rxq(sk);
591 priv_rx->rxq = rxq;
592 priv_rx->sk = sk;
593
594 priv_rx->stats = &priv->channel_stats[rxq].rq;
595 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
596
597 rqtn = priv->direct_tir[rxq].rqt.rqtn;
598
599 err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
600 if (err)
601 goto err_create_tir;
602
603 init_completion(&priv_rx->add_ctx);
604
605 accel_rule_init(&priv_rx->rule, priv, sk);
606 resync = &priv_rx->resync;
607 resync_init(resync, priv);
608 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
609 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC);
610
611 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn);
612 if (err)
613 goto err_post_wqes;
614
615 priv_rx->stats->tls_ctx++;
616
617 return 0;
618
619 err_post_wqes:
620 mlx5_core_destroy_tir(mdev, priv_rx->tirn);
621 err_create_tir:
622 mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
623 err_create_key:
624 kfree(priv_rx);
625 return err;
626 }
627
628 /* Elevated refcount on the resync object means there are
629 * outstanding operations (uncompleted GET_PSV WQEs) that
630 * will read the resync / priv_rx objects once completed.
631 * Wait for them to avoid use-after-free.
632 */
wait_for_resync(struct net_device * netdev,struct mlx5e_ktls_rx_resync_ctx * resync)633 static void wait_for_resync(struct net_device *netdev,
634 struct mlx5e_ktls_rx_resync_ctx *resync)
635 {
636 #define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
637 unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
638 unsigned int refcnt;
639
640 do {
641 refcnt = refcount_read(&resync->refcnt);
642 if (refcnt == 1)
643 return;
644
645 msleep(20);
646 } while (time_before(jiffies, exp_time));
647
648 netdev_warn(netdev,
649 "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
650 refcnt);
651 }
652
mlx5e_ktls_del_rx(struct net_device * netdev,struct tls_context * tls_ctx)653 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
654 {
655 struct mlx5e_ktls_offload_context_rx *priv_rx;
656 struct mlx5e_ktls_rx_resync_ctx *resync;
657 struct mlx5_core_dev *mdev;
658 struct mlx5e_priv *priv;
659
660 priv = netdev_priv(netdev);
661 mdev = priv->mdev;
662
663 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
664 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
665 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
666 synchronize_rcu(); /* Sync with NAPI */
667 if (!cancel_work_sync(&priv_rx->rule.work))
668 /* completion is needed, as the priv_rx in the add flow
669 * is maintained on the wqe info (wi), not on the socket.
670 */
671 wait_for_completion(&priv_rx->add_ctx);
672 resync = &priv_rx->resync;
673 if (cancel_work_sync(&resync->work))
674 refcount_dec(&resync->refcnt);
675 wait_for_resync(netdev, resync);
676
677 priv_rx->stats->tls_del++;
678 if (priv_rx->rule.rule)
679 mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
680
681 mlx5_core_destroy_tir(mdev, priv_rx->tirn);
682 mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
683 kfree(priv_rx);
684 }
685