1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3
4 #include <linux/tls.h>
5 #include "en.h"
6 #include "en/txrx.h"
7 #include "en_accel/ktls.h"
8
9 enum {
10 MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
11 };
12
13 enum {
14 MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
15 };
16
17 #define EXTRACT_INFO_FIELDS do { \
18 salt = info->salt; \
19 rec_seq = info->rec_seq; \
20 salt_sz = sizeof(info->salt); \
21 rec_seq_sz = sizeof(info->rec_seq); \
22 } while (0)
23
24 static void
fill_static_params_ctx(void * ctx,struct mlx5e_ktls_offload_context_tx * priv_tx)25 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
26 {
27 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
28 char *initial_rn, *gcm_iv;
29 u16 salt_sz, rec_seq_sz;
30 char *salt, *rec_seq;
31 u8 tls_version;
32
33 EXTRACT_INFO_FIELDS;
34
35 gcm_iv = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
36 initial_rn = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
37
38 memcpy(gcm_iv, salt, salt_sz);
39 memcpy(initial_rn, rec_seq, rec_seq_sz);
40
41 tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
42
43 MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
44 MLX5_SET(tls_static_params, ctx, const_1, 1);
45 MLX5_SET(tls_static_params, ctx, const_2, 2);
46 MLX5_SET(tls_static_params, ctx, encryption_standard,
47 MLX5E_ENCRYPTION_STANDARD_TLS);
48 MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
49 }
50
51 static void
build_static_params(struct mlx5e_umr_wqe * wqe,u16 pc,u32 sqn,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)52 build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
53 struct mlx5e_ktls_offload_context_tx *priv_tx,
54 bool fence)
55 {
56 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
57 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
58
59 #define STATIC_PARAMS_DS_CNT \
60 DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
61
62 cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
63 (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
64 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
65 STATIC_PARAMS_DS_CNT);
66 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
67 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
68
69 ucseg->flags = MLX5_UMR_INLINE;
70 ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
71
72 fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
73 }
74
75 static void
fill_progress_params_ctx(void * ctx,struct mlx5e_ktls_offload_context_tx * priv_tx)76 fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
77 {
78 MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
79 MLX5_SET(tls_progress_params, ctx, record_tracker_state,
80 MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
81 MLX5_SET(tls_progress_params, ctx, auth_state,
82 MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
83 }
84
85 static void
build_progress_params(struct mlx5e_tx_wqe * wqe,u16 pc,u32 sqn,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)86 build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
87 struct mlx5e_ktls_offload_context_tx *priv_tx,
88 bool fence)
89 {
90 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
91
92 #define PROGRESS_PARAMS_DS_CNT \
93 DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
94
95 cseg->opmod_idx_opcode =
96 cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
97 (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
98 cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
99 PROGRESS_PARAMS_DS_CNT);
100 cseg->fm_ce_se = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
101
102 fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
103 }
104
tx_fill_wi(struct mlx5e_txqsq * sq,u16 pi,u8 num_wqebbs,u32 num_bytes,struct page * page)105 static void tx_fill_wi(struct mlx5e_txqsq *sq,
106 u16 pi, u8 num_wqebbs, u32 num_bytes,
107 struct page *page)
108 {
109 struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
110
111 memset(wi, 0, sizeof(*wi));
112 wi->num_wqebbs = num_wqebbs;
113 wi->num_bytes = num_bytes;
114 wi->resync_dump_frag_page = page;
115 }
116
mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx * priv_tx)117 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
118 {
119 priv_tx->ctx_post_pending = true;
120 }
121
122 static bool
mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx * priv_tx)123 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
124 {
125 bool ret = priv_tx->ctx_post_pending;
126
127 priv_tx->ctx_post_pending = false;
128
129 return ret;
130 }
131
132 static void
post_static_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)133 post_static_params(struct mlx5e_txqsq *sq,
134 struct mlx5e_ktls_offload_context_tx *priv_tx,
135 bool fence)
136 {
137 struct mlx5e_umr_wqe *umr_wqe;
138 u16 pi;
139
140 umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
141 build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
142 tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
143 sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
144 }
145
146 static void
post_progress_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool fence)147 post_progress_params(struct mlx5e_txqsq *sq,
148 struct mlx5e_ktls_offload_context_tx *priv_tx,
149 bool fence)
150 {
151 struct mlx5e_tx_wqe *wqe;
152 u16 pi;
153
154 wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
155 build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
156 tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
157 sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
158 }
159
160 static void
mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,bool skip_static_post,bool fence_first_post)161 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
162 struct mlx5e_ktls_offload_context_tx *priv_tx,
163 bool skip_static_post, bool fence_first_post)
164 {
165 bool progress_fence = skip_static_post || !fence_first_post;
166 struct mlx5_wq_cyc *wq = &sq->wq;
167 u16 contig_wqebbs_room, pi;
168
169 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
170 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
171 if (unlikely(contig_wqebbs_room <
172 MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
173 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
174
175 if (!skip_static_post)
176 post_static_params(sq, priv_tx, fence_first_post);
177
178 post_progress_params(sq, priv_tx, progress_fence);
179 }
180
181 struct tx_sync_info {
182 u64 rcd_sn;
183 s32 sync_len;
184 int nr_frags;
185 skb_frag_t frags[MAX_SKB_FRAGS];
186 };
187
188 enum mlx5e_ktls_sync_retval {
189 MLX5E_KTLS_SYNC_DONE,
190 MLX5E_KTLS_SYNC_FAIL,
191 MLX5E_KTLS_SYNC_SKIP_NO_DATA,
192 };
193
194 static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx * priv_tx,u32 tcp_seq,struct tx_sync_info * info)195 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
196 u32 tcp_seq, struct tx_sync_info *info)
197 {
198 struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
199 enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
200 struct tls_record_info *record;
201 int remaining, i = 0;
202 unsigned long flags;
203
204 spin_lock_irqsave(&tx_ctx->lock, flags);
205 record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
206
207 if (unlikely(!record)) {
208 ret = MLX5E_KTLS_SYNC_FAIL;
209 goto out;
210 }
211
212 if (unlikely(tcp_seq < tls_record_start_seq(record))) {
213 ret = tls_record_is_start_marker(record) ?
214 MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
215 goto out;
216 }
217
218 info->sync_len = tcp_seq - tls_record_start_seq(record);
219 remaining = info->sync_len;
220 while (remaining > 0) {
221 skb_frag_t *frag = &record->frags[i];
222
223 get_page(skb_frag_page(frag));
224 remaining -= skb_frag_size(frag);
225 info->frags[i++] = *frag;
226 }
227 /* reduce the part which will be sent with the original SKB */
228 if (remaining < 0)
229 skb_frag_size_add(&info->frags[i - 1], remaining);
230 info->nr_frags = i;
231 out:
232 spin_unlock_irqrestore(&tx_ctx->lock, flags);
233 return ret;
234 }
235
236 static void
tx_post_resync_params(struct mlx5e_txqsq * sq,struct mlx5e_ktls_offload_context_tx * priv_tx,u64 rcd_sn)237 tx_post_resync_params(struct mlx5e_txqsq *sq,
238 struct mlx5e_ktls_offload_context_tx *priv_tx,
239 u64 rcd_sn)
240 {
241 struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
242 __be64 rn_be = cpu_to_be64(rcd_sn);
243 bool skip_static_post;
244 u16 rec_seq_sz;
245 char *rec_seq;
246
247 rec_seq = info->rec_seq;
248 rec_seq_sz = sizeof(info->rec_seq);
249
250 skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
251 if (!skip_static_post)
252 memcpy(rec_seq, &rn_be, rec_seq_sz);
253
254 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
255 }
256
257 static int
tx_post_resync_dump(struct mlx5e_txqsq * sq,skb_frag_t * frag,u32 tisn,bool first)258 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
259 {
260 struct mlx5_wqe_ctrl_seg *cseg;
261 struct mlx5_wqe_data_seg *dseg;
262 struct mlx5e_dump_wqe *wqe;
263 dma_addr_t dma_addr = 0;
264 u16 ds_cnt;
265 int fsz;
266 u16 pi;
267
268 wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
269
270 ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
271
272 cseg = &wqe->ctrl;
273 dseg = &wqe->data;
274
275 cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
276 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
277 cseg->tisn = cpu_to_be32(tisn << 8);
278 cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
279
280 fsz = skb_frag_size(frag);
281 dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
282 DMA_TO_DEVICE);
283 if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
284 return -ENOMEM;
285
286 dseg->addr = cpu_to_be64(dma_addr);
287 dseg->lkey = sq->mkey_be;
288 dseg->byte_count = cpu_to_be32(fsz);
289 mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
290
291 tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
292 sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
293
294 return 0;
295 }
296
mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe_info * wi,u32 * dma_fifo_cc)297 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
298 struct mlx5e_tx_wqe_info *wi,
299 u32 *dma_fifo_cc)
300 {
301 struct mlx5e_sq_stats *stats;
302 struct mlx5e_sq_dma *dma;
303
304 if (!wi->resync_dump_frag_page)
305 return;
306
307 dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
308 stats = sq->stats;
309
310 mlx5e_tx_dma_unmap(sq->pdev, dma);
311 put_page(wi->resync_dump_frag_page);
312 stats->tls_dump_packets++;
313 stats->tls_dump_bytes += wi->num_bytes;
314 }
315
tx_post_fence_nop(struct mlx5e_txqsq * sq)316 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
317 {
318 struct mlx5_wq_cyc *wq = &sq->wq;
319 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
320
321 tx_fill_wi(sq, pi, 1, 0, NULL);
322
323 mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
324 }
325
326 static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx * priv_tx,struct mlx5e_txqsq * sq,int datalen,u32 seq)327 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
328 struct mlx5e_txqsq *sq,
329 int datalen,
330 u32 seq)
331 {
332 struct mlx5e_sq_stats *stats = sq->stats;
333 struct mlx5_wq_cyc *wq = &sq->wq;
334 enum mlx5e_ktls_sync_retval ret;
335 struct tx_sync_info info = {};
336 u16 contig_wqebbs_room, pi;
337 u8 num_wqebbs;
338 int i = 0;
339
340 ret = tx_sync_info_get(priv_tx, seq, &info);
341 if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
342 if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
343 stats->tls_skip_no_sync_data++;
344 return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
345 }
346 /* We might get here if a retransmission reaches the driver
347 * after the relevant record is acked.
348 * It should be safe to drop the packet in this case
349 */
350 stats->tls_drop_no_sync_data++;
351 goto err_out;
352 }
353
354 if (unlikely(info.sync_len < 0)) {
355 if (likely(datalen <= -info.sync_len))
356 return MLX5E_KTLS_SYNC_DONE;
357
358 stats->tls_drop_bypass_req++;
359 goto err_out;
360 }
361
362 stats->tls_ooo++;
363
364 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
365
366 /* If no dump WQE was sent, we need to have a fence NOP WQE before the
367 * actual data xmit.
368 */
369 if (!info.nr_frags) {
370 tx_post_fence_nop(sq);
371 return MLX5E_KTLS_SYNC_DONE;
372 }
373
374 num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
375 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
376 contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
377
378 if (unlikely(contig_wqebbs_room < num_wqebbs))
379 mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
380
381 tx_post_resync_params(sq, priv_tx, info.rcd_sn);
382
383 for (; i < info.nr_frags; i++) {
384 unsigned int orig_fsz, frag_offset = 0, n = 0;
385 skb_frag_t *f = &info.frags[i];
386
387 orig_fsz = skb_frag_size(f);
388
389 do {
390 bool fence = !(i || frag_offset);
391 unsigned int fsz;
392
393 n++;
394 fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
395 skb_frag_size_set(f, fsz);
396 if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
397 page_ref_add(skb_frag_page(f), n - 1);
398 goto err_out;
399 }
400
401 skb_frag_off_add(f, fsz);
402 frag_offset += fsz;
403 } while (frag_offset < orig_fsz);
404
405 page_ref_add(skb_frag_page(f), n - 1);
406 }
407
408 return MLX5E_KTLS_SYNC_DONE;
409
410 err_out:
411 for (; i < info.nr_frags; i++)
412 /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
413 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
414 * released only upon their completions (or in mlx5e_free_txqsq_descs,
415 * if channel closes).
416 */
417 put_page(skb_frag_page(&info.frags[i]));
418
419 return MLX5E_KTLS_SYNC_FAIL;
420 }
421
mlx5e_ktls_handle_tx_skb(struct net_device * netdev,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_tx_wqe ** wqe,u16 * pi)422 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
423 struct mlx5e_txqsq *sq,
424 struct sk_buff *skb,
425 struct mlx5e_tx_wqe **wqe, u16 *pi)
426 {
427 struct mlx5e_ktls_offload_context_tx *priv_tx;
428 struct mlx5e_sq_stats *stats = sq->stats;
429 struct mlx5_wqe_ctrl_seg *cseg;
430 struct tls_context *tls_ctx;
431 int datalen;
432 u32 seq;
433
434 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
435 goto out;
436
437 datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
438 if (!datalen)
439 goto out;
440
441 tls_ctx = tls_get_ctx(skb->sk);
442 if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
443 goto err_out;
444
445 priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
446
447 if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
448 mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
449 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
450 stats->tls_ctx++;
451 }
452
453 seq = ntohl(tcp_hdr(skb)->seq);
454 if (unlikely(priv_tx->expected_seq != seq)) {
455 enum mlx5e_ktls_sync_retval ret =
456 mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
457
458 if (likely(ret == MLX5E_KTLS_SYNC_DONE))
459 *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
460 else if (ret == MLX5E_KTLS_SYNC_FAIL)
461 goto err_out;
462 else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
463 goto out;
464 }
465
466 priv_tx->expected_seq = seq + datalen;
467
468 cseg = &(*wqe)->ctrl;
469 cseg->tisn = cpu_to_be32(priv_tx->tisn << 8);
470
471 stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
472 stats->tls_encrypted_bytes += datalen;
473
474 out:
475 return skb;
476
477 err_out:
478 dev_kfree_skb_any(skb);
479 return NULL;
480 }
481