1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/akcipher.h>
4 #include <crypto/curve25519.h>
5 #include <crypto/dh.h>
6 #include <crypto/ecc_curve.h>
7 #include <crypto/ecdh.h>
8 #include <crypto/rng.h>
9 #include <crypto/internal/akcipher.h>
10 #include <crypto/internal/kpp.h>
11 #include <crypto/internal/rsa.h>
12 #include <crypto/kpp.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/fips.h>
16 #include <linux/module.h>
17 #include <linux/time.h>
18 #include "hpre.h"
19
20 struct hpre_ctx;
21
22 #define HPRE_CRYPTO_ALG_PRI 1000
23 #define HPRE_ALIGN_SZ 64
24 #define HPRE_BITS_2_BYTES_SHIFT 3
25 #define HPRE_RSA_512BITS_KSZ 64
26 #define HPRE_RSA_1536BITS_KSZ 192
27 #define HPRE_CRT_PRMS 5
28 #define HPRE_CRT_Q 2
29 #define HPRE_CRT_P 3
30 #define HPRE_CRT_INV 4
31 #define HPRE_DH_G_FLAG 0x02
32 #define HPRE_TRY_SEND_TIMES 100
33 #define HPRE_INVLD_REQ_ID (-1)
34
35 #define HPRE_SQE_ALG_BITS 5
36 #define HPRE_SQE_DONE_SHIFT 30
37 #define HPRE_DH_MAX_P_SZ 512
38
39 #define HPRE_DFX_SEC_TO_US 1000000
40 #define HPRE_DFX_US_TO_NS 1000
41
42 /* due to nist p521 */
43 #define HPRE_ECC_MAX_KSZ 66
44
45 /* size in bytes of the n prime */
46 #define HPRE_ECC_NIST_P192_N_SIZE 24
47 #define HPRE_ECC_NIST_P256_N_SIZE 32
48 #define HPRE_ECC_NIST_P384_N_SIZE 48
49
50 /* size in bytes */
51 #define HPRE_ECC_HW256_KSZ_B 32
52 #define HPRE_ECC_HW384_KSZ_B 48
53
54 /* capability register mask of driver */
55 #define HPRE_DRV_RSA_MASK_CAP BIT(0)
56 #define HPRE_DRV_DH_MASK_CAP BIT(1)
57 #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
58 #define HPRE_DRV_X25519_MASK_CAP BIT(5)
59
60 typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
61
62 struct hpre_rsa_ctx {
63 /* low address: e--->n */
64 char *pubkey;
65 dma_addr_t dma_pubkey;
66
67 /* low address: d--->n */
68 char *prikey;
69 dma_addr_t dma_prikey;
70
71 /* low address: dq->dp->q->p->qinv */
72 char *crt_prikey;
73 dma_addr_t dma_crt_prikey;
74
75 struct crypto_akcipher *soft_tfm;
76 };
77
78 struct hpre_dh_ctx {
79 /*
80 * If base is g we compute the public key
81 * ya = g^xa mod p; [RFC2631 sec 2.1.1]
82 * else if base if the counterpart public key we
83 * compute the shared secret
84 * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
85 * low address: d--->n, please refer to Hisilicon HPRE UM
86 */
87 char *xa_p;
88 dma_addr_t dma_xa_p;
89
90 char *g; /* m */
91 dma_addr_t dma_g;
92 };
93
94 struct hpre_ecdh_ctx {
95 /* low address: p->a->k->b */
96 unsigned char *p;
97 dma_addr_t dma_p;
98
99 /* low address: x->y */
100 unsigned char *g;
101 dma_addr_t dma_g;
102 };
103
104 struct hpre_curve25519_ctx {
105 /* low address: p->a->k */
106 unsigned char *p;
107 dma_addr_t dma_p;
108
109 /* gx coordinate */
110 unsigned char *g;
111 dma_addr_t dma_g;
112 };
113
114 struct hpre_ctx {
115 struct hisi_qp *qp;
116 struct device *dev;
117 struct hpre_asym_request **req_list;
118 struct hpre *hpre;
119 spinlock_t req_lock;
120 unsigned int key_sz;
121 bool crt_g2_mode;
122 struct idr req_idr;
123 union {
124 struct hpre_rsa_ctx rsa;
125 struct hpre_dh_ctx dh;
126 struct hpre_ecdh_ctx ecdh;
127 struct hpre_curve25519_ctx curve25519;
128 };
129 /* for ecc algorithms */
130 unsigned int curve_id;
131 };
132
133 struct hpre_asym_request {
134 char *src;
135 char *dst;
136 struct hpre_sqe req;
137 struct hpre_ctx *ctx;
138 union {
139 struct akcipher_request *rsa;
140 struct kpp_request *dh;
141 struct kpp_request *ecdh;
142 struct kpp_request *curve25519;
143 } areq;
144 int err;
145 int req_id;
146 hpre_cb cb;
147 struct timespec64 req_time;
148 };
149
hpre_alloc_req_id(struct hpre_ctx * ctx)150 static int hpre_alloc_req_id(struct hpre_ctx *ctx)
151 {
152 unsigned long flags;
153 int id;
154
155 spin_lock_irqsave(&ctx->req_lock, flags);
156 id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
157 spin_unlock_irqrestore(&ctx->req_lock, flags);
158
159 return id;
160 }
161
hpre_free_req_id(struct hpre_ctx * ctx,int req_id)162 static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
163 {
164 unsigned long flags;
165
166 spin_lock_irqsave(&ctx->req_lock, flags);
167 idr_remove(&ctx->req_idr, req_id);
168 spin_unlock_irqrestore(&ctx->req_lock, flags);
169 }
170
hpre_add_req_to_ctx(struct hpre_asym_request * hpre_req)171 static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
172 {
173 struct hpre_ctx *ctx;
174 struct hpre_dfx *dfx;
175 int id;
176
177 ctx = hpre_req->ctx;
178 id = hpre_alloc_req_id(ctx);
179 if (unlikely(id < 0))
180 return -EINVAL;
181
182 ctx->req_list[id] = hpre_req;
183 hpre_req->req_id = id;
184
185 dfx = ctx->hpre->debug.dfx;
186 if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
187 ktime_get_ts64(&hpre_req->req_time);
188
189 return id;
190 }
191
hpre_rm_req_from_ctx(struct hpre_asym_request * hpre_req)192 static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
193 {
194 struct hpre_ctx *ctx = hpre_req->ctx;
195 int id = hpre_req->req_id;
196
197 if (hpre_req->req_id >= 0) {
198 hpre_req->req_id = HPRE_INVLD_REQ_ID;
199 ctx->req_list[id] = NULL;
200 hpre_free_req_id(ctx, id);
201 }
202 }
203
hpre_get_qp_and_start(u8 type)204 static struct hisi_qp *hpre_get_qp_and_start(u8 type)
205 {
206 struct hisi_qp *qp;
207 int ret;
208
209 qp = hpre_create_qp(type);
210 if (!qp) {
211 pr_err("Can not create hpre qp!\n");
212 return ERR_PTR(-ENODEV);
213 }
214
215 ret = hisi_qm_start_qp(qp, 0);
216 if (ret < 0) {
217 hisi_qm_free_qps(&qp, 1);
218 pci_err(qp->qm->pdev, "Can not start qp!\n");
219 return ERR_PTR(-EINVAL);
220 }
221
222 return qp;
223 }
224
hpre_get_data_dma_addr(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)225 static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
226 struct scatterlist *data, unsigned int len,
227 int is_src, dma_addr_t *tmp)
228 {
229 struct device *dev = hpre_req->ctx->dev;
230 enum dma_data_direction dma_dir;
231
232 if (is_src) {
233 hpre_req->src = NULL;
234 dma_dir = DMA_TO_DEVICE;
235 } else {
236 hpre_req->dst = NULL;
237 dma_dir = DMA_FROM_DEVICE;
238 }
239 *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
240 if (unlikely(dma_mapping_error(dev, *tmp))) {
241 dev_err(dev, "dma map data err!\n");
242 return -ENOMEM;
243 }
244
245 return 0;
246 }
247
hpre_prepare_dma_buf(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,dma_addr_t * tmp)248 static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
249 struct scatterlist *data, unsigned int len,
250 int is_src, dma_addr_t *tmp)
251 {
252 struct hpre_ctx *ctx = hpre_req->ctx;
253 struct device *dev = ctx->dev;
254 void *ptr;
255 int shift;
256
257 shift = ctx->key_sz - len;
258 if (unlikely(shift < 0))
259 return -EINVAL;
260
261 ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
262 if (unlikely(!ptr))
263 return -ENOMEM;
264
265 if (is_src) {
266 scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
267 hpre_req->src = ptr;
268 } else {
269 hpre_req->dst = ptr;
270 }
271
272 return 0;
273 }
274
hpre_hw_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len,int is_src,int is_dh)275 static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
276 struct scatterlist *data, unsigned int len,
277 int is_src, int is_dh)
278 {
279 struct hpre_sqe *msg = &hpre_req->req;
280 struct hpre_ctx *ctx = hpre_req->ctx;
281 dma_addr_t tmp = 0;
282 int ret;
283
284 /* when the data is dh's source, we should format it */
285 if ((sg_is_last(data) && len == ctx->key_sz) &&
286 ((is_dh && !is_src) || !is_dh))
287 ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
288 else
289 ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
290
291 if (unlikely(ret))
292 return ret;
293
294 if (is_src)
295 msg->in = cpu_to_le64(tmp);
296 else
297 msg->out = cpu_to_le64(tmp);
298
299 return 0;
300 }
301
hpre_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)302 static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
303 struct hpre_asym_request *req,
304 struct scatterlist *dst,
305 struct scatterlist *src)
306 {
307 struct device *dev = ctx->dev;
308 struct hpre_sqe *sqe = &req->req;
309 dma_addr_t tmp;
310
311 tmp = le64_to_cpu(sqe->in);
312 if (unlikely(dma_mapping_error(dev, tmp)))
313 return;
314
315 if (src) {
316 if (req->src)
317 dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
318 else
319 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
320 }
321
322 tmp = le64_to_cpu(sqe->out);
323 if (unlikely(dma_mapping_error(dev, tmp)))
324 return;
325
326 if (req->dst) {
327 if (dst)
328 scatterwalk_map_and_copy(req->dst, dst, 0,
329 ctx->key_sz, 1);
330 dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
331 } else {
332 dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
333 }
334 }
335
hpre_alg_res_post_hf(struct hpre_ctx * ctx,struct hpre_sqe * sqe,void ** kreq)336 static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
337 void **kreq)
338 {
339 struct hpre_asym_request *req;
340 unsigned int err, done, alg;
341 int id;
342
343 #define HPRE_NO_HW_ERR 0
344 #define HPRE_HW_TASK_DONE 3
345 #define HREE_HW_ERR_MASK GENMASK(10, 0)
346 #define HREE_SQE_DONE_MASK GENMASK(1, 0)
347 #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
348 id = (int)le16_to_cpu(sqe->tag);
349 req = ctx->req_list[id];
350 hpre_rm_req_from_ctx(req);
351 *kreq = req;
352
353 err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
354 HREE_HW_ERR_MASK;
355
356 done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
357 HREE_SQE_DONE_MASK;
358
359 if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
360 return 0;
361
362 alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
363 dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
364 alg, done, err);
365
366 return -EINVAL;
367 }
368
hpre_ctx_set(struct hpre_ctx * ctx,struct hisi_qp * qp,int qlen)369 static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
370 {
371 struct hpre *hpre;
372
373 if (!ctx || !qp || qlen < 0)
374 return -EINVAL;
375
376 spin_lock_init(&ctx->req_lock);
377 ctx->qp = qp;
378 ctx->dev = &qp->qm->pdev->dev;
379
380 hpre = container_of(ctx->qp->qm, struct hpre, qm);
381 ctx->hpre = hpre;
382 ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
383 if (!ctx->req_list)
384 return -ENOMEM;
385 ctx->key_sz = 0;
386 ctx->crt_g2_mode = false;
387 idr_init(&ctx->req_idr);
388
389 return 0;
390 }
391
hpre_ctx_clear(struct hpre_ctx * ctx,bool is_clear_all)392 static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
393 {
394 if (is_clear_all) {
395 idr_destroy(&ctx->req_idr);
396 kfree(ctx->req_list);
397 hisi_qm_free_qps(&ctx->qp, 1);
398 }
399
400 ctx->crt_g2_mode = false;
401 ctx->key_sz = 0;
402 }
403
hpre_is_bd_timeout(struct hpre_asym_request * req,u64 overtime_thrhld)404 static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
405 u64 overtime_thrhld)
406 {
407 struct timespec64 reply_time;
408 u64 time_use_us;
409
410 ktime_get_ts64(&reply_time);
411 time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
412 HPRE_DFX_SEC_TO_US +
413 (reply_time.tv_nsec - req->req_time.tv_nsec) /
414 HPRE_DFX_US_TO_NS;
415
416 if (time_use_us <= overtime_thrhld)
417 return false;
418
419 return true;
420 }
421
hpre_dh_cb(struct hpre_ctx * ctx,void * resp)422 static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
423 {
424 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
425 struct hpre_asym_request *req;
426 struct kpp_request *areq;
427 u64 overtime_thrhld;
428 int ret;
429
430 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
431 areq = req->areq.dh;
432 areq->dst_len = ctx->key_sz;
433
434 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
435 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
436 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
437
438 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
439 kpp_request_complete(areq, ret);
440 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
441 }
442
hpre_rsa_cb(struct hpre_ctx * ctx,void * resp)443 static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
444 {
445 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
446 struct hpre_asym_request *req;
447 struct akcipher_request *areq;
448 u64 overtime_thrhld;
449 int ret;
450
451 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
452
453 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
454 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
455 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
456
457 areq = req->areq.rsa;
458 areq->dst_len = ctx->key_sz;
459 hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
460 akcipher_request_complete(areq, ret);
461 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
462 }
463
hpre_alg_cb(struct hisi_qp * qp,void * resp)464 static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
465 {
466 struct hpre_ctx *ctx = qp->qp_ctx;
467 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
468 struct hpre_sqe *sqe = resp;
469 struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
470
471 if (unlikely(!req)) {
472 atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
473 return;
474 }
475
476 req->cb(ctx, resp);
477 }
478
hpre_stop_qp_and_put(struct hisi_qp * qp)479 static void hpre_stop_qp_and_put(struct hisi_qp *qp)
480 {
481 hisi_qm_stop_qp(qp);
482 hisi_qm_free_qps(&qp, 1);
483 }
484
hpre_ctx_init(struct hpre_ctx * ctx,u8 type)485 static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
486 {
487 struct hisi_qp *qp;
488 int ret;
489
490 qp = hpre_get_qp_and_start(type);
491 if (IS_ERR(qp))
492 return PTR_ERR(qp);
493
494 qp->qp_ctx = ctx;
495 qp->req_cb = hpre_alg_cb;
496
497 ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
498 if (ret)
499 hpre_stop_qp_and_put(qp);
500
501 return ret;
502 }
503
hpre_msg_request_set(struct hpre_ctx * ctx,void * req,bool is_rsa)504 static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
505 {
506 struct hpre_asym_request *h_req;
507 struct hpre_sqe *msg;
508 int req_id;
509 void *tmp;
510
511 if (is_rsa) {
512 struct akcipher_request *akreq = req;
513
514 if (akreq->dst_len < ctx->key_sz) {
515 akreq->dst_len = ctx->key_sz;
516 return -EOVERFLOW;
517 }
518
519 tmp = akcipher_request_ctx(akreq);
520 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
521 h_req->cb = hpre_rsa_cb;
522 h_req->areq.rsa = akreq;
523 msg = &h_req->req;
524 memset(msg, 0, sizeof(*msg));
525 } else {
526 struct kpp_request *kreq = req;
527
528 if (kreq->dst_len < ctx->key_sz) {
529 kreq->dst_len = ctx->key_sz;
530 return -EOVERFLOW;
531 }
532
533 tmp = kpp_request_ctx(kreq);
534 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
535 h_req->cb = hpre_dh_cb;
536 h_req->areq.dh = kreq;
537 msg = &h_req->req;
538 memset(msg, 0, sizeof(*msg));
539 msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
540 }
541
542 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
543 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
544 msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
545 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
546 h_req->ctx = ctx;
547
548 req_id = hpre_add_req_to_ctx(h_req);
549 if (req_id < 0)
550 return -EBUSY;
551
552 msg->tag = cpu_to_le16((u16)req_id);
553
554 return 0;
555 }
556
hpre_send(struct hpre_ctx * ctx,struct hpre_sqe * msg)557 static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
558 {
559 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
560 int ctr = 0;
561 int ret;
562
563 do {
564 atomic64_inc(&dfx[HPRE_SEND_CNT].value);
565 ret = hisi_qp_send(ctx->qp, msg);
566 if (ret != -EBUSY)
567 break;
568 atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
569 } while (ctr++ < HPRE_TRY_SEND_TIMES);
570
571 if (likely(!ret))
572 return ret;
573
574 if (ret != -EBUSY)
575 atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
576
577 return ret;
578 }
579
hpre_dh_compute_value(struct kpp_request * req)580 static int hpre_dh_compute_value(struct kpp_request *req)
581 {
582 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
583 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
584 void *tmp = kpp_request_ctx(req);
585 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
586 struct hpre_sqe *msg = &hpre_req->req;
587 int ret;
588
589 ret = hpre_msg_request_set(ctx, req, false);
590 if (unlikely(ret))
591 return ret;
592
593 if (req->src) {
594 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
595 if (unlikely(ret))
596 goto clear_all;
597 } else {
598 msg->in = cpu_to_le64(ctx->dh.dma_g);
599 }
600
601 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
602 if (unlikely(ret))
603 goto clear_all;
604
605 if (ctx->crt_g2_mode && !req->src)
606 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
607 else
608 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
609
610 /* success */
611 ret = hpre_send(ctx, msg);
612 if (likely(!ret))
613 return -EINPROGRESS;
614
615 clear_all:
616 hpre_rm_req_from_ctx(hpre_req);
617 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
618
619 return ret;
620 }
621
hpre_is_dh_params_length_valid(unsigned int key_sz)622 static int hpre_is_dh_params_length_valid(unsigned int key_sz)
623 {
624 #define _HPRE_DH_GRP1 768
625 #define _HPRE_DH_GRP2 1024
626 #define _HPRE_DH_GRP5 1536
627 #define _HPRE_DH_GRP14 2048
628 #define _HPRE_DH_GRP15 3072
629 #define _HPRE_DH_GRP16 4096
630 switch (key_sz) {
631 case _HPRE_DH_GRP1:
632 case _HPRE_DH_GRP2:
633 case _HPRE_DH_GRP5:
634 case _HPRE_DH_GRP14:
635 case _HPRE_DH_GRP15:
636 case _HPRE_DH_GRP16:
637 return 0;
638 default:
639 return -EINVAL;
640 }
641 }
642
hpre_dh_set_params(struct hpre_ctx * ctx,struct dh * params)643 static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
644 {
645 struct device *dev = ctx->dev;
646 unsigned int sz;
647
648 if (params->p_size > HPRE_DH_MAX_P_SZ)
649 return -EINVAL;
650
651 if (hpre_is_dh_params_length_valid(params->p_size <<
652 HPRE_BITS_2_BYTES_SHIFT))
653 return -EINVAL;
654
655 sz = ctx->key_sz = params->p_size;
656 ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
657 &ctx->dh.dma_xa_p, GFP_KERNEL);
658 if (!ctx->dh.xa_p)
659 return -ENOMEM;
660
661 memcpy(ctx->dh.xa_p + sz, params->p, sz);
662
663 /* If g equals 2 don't copy it */
664 if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
665 ctx->crt_g2_mode = true;
666 return 0;
667 }
668
669 ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
670 if (!ctx->dh.g) {
671 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
672 ctx->dh.dma_xa_p);
673 ctx->dh.xa_p = NULL;
674 return -ENOMEM;
675 }
676
677 memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
678
679 return 0;
680 }
681
hpre_dh_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)682 static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
683 {
684 struct device *dev = ctx->dev;
685 unsigned int sz = ctx->key_sz;
686
687 if (is_clear_all)
688 hisi_qm_stop_qp(ctx->qp);
689
690 if (ctx->dh.g) {
691 dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
692 ctx->dh.g = NULL;
693 }
694
695 if (ctx->dh.xa_p) {
696 memzero_explicit(ctx->dh.xa_p, sz);
697 dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
698 ctx->dh.dma_xa_p);
699 ctx->dh.xa_p = NULL;
700 }
701
702 hpre_ctx_clear(ctx, is_clear_all);
703 }
704
hpre_dh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)705 static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
706 unsigned int len)
707 {
708 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
709 struct dh params;
710 int ret;
711
712 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
713 return -EINVAL;
714
715 /* Free old secret if any */
716 hpre_dh_clear_ctx(ctx, false);
717
718 ret = hpre_dh_set_params(ctx, ¶ms);
719 if (ret < 0)
720 goto err_clear_ctx;
721
722 memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
723 params.key_size);
724
725 return 0;
726
727 err_clear_ctx:
728 hpre_dh_clear_ctx(ctx, false);
729 return ret;
730 }
731
hpre_dh_max_size(struct crypto_kpp * tfm)732 static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
733 {
734 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
735
736 return ctx->key_sz;
737 }
738
hpre_dh_init_tfm(struct crypto_kpp * tfm)739 static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
740 {
741 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
742
743 return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
744 }
745
hpre_dh_exit_tfm(struct crypto_kpp * tfm)746 static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
747 {
748 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
749
750 hpre_dh_clear_ctx(ctx, true);
751 }
752
hpre_rsa_drop_leading_zeros(const char ** ptr,size_t * len)753 static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
754 {
755 while (!**ptr && *len) {
756 (*ptr)++;
757 (*len)--;
758 }
759 }
760
hpre_rsa_key_size_is_support(unsigned int len)761 static bool hpre_rsa_key_size_is_support(unsigned int len)
762 {
763 unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
764
765 #define _RSA_1024BITS_KEY_WDTH 1024
766 #define _RSA_2048BITS_KEY_WDTH 2048
767 #define _RSA_3072BITS_KEY_WDTH 3072
768 #define _RSA_4096BITS_KEY_WDTH 4096
769
770 switch (bits) {
771 case _RSA_1024BITS_KEY_WDTH:
772 case _RSA_2048BITS_KEY_WDTH:
773 case _RSA_3072BITS_KEY_WDTH:
774 case _RSA_4096BITS_KEY_WDTH:
775 return true;
776 default:
777 return false;
778 }
779 }
780
hpre_rsa_enc(struct akcipher_request * req)781 static int hpre_rsa_enc(struct akcipher_request *req)
782 {
783 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
784 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
785 void *tmp = akcipher_request_ctx(req);
786 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
787 struct hpre_sqe *msg = &hpre_req->req;
788 int ret;
789
790 /* For 512 and 1536 bits key size, use soft tfm instead */
791 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
792 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
793 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
794 ret = crypto_akcipher_encrypt(req);
795 akcipher_request_set_tfm(req, tfm);
796 return ret;
797 }
798
799 if (unlikely(!ctx->rsa.pubkey))
800 return -EINVAL;
801
802 ret = hpre_msg_request_set(ctx, req, true);
803 if (unlikely(ret))
804 return ret;
805
806 msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
807 msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
808
809 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
810 if (unlikely(ret))
811 goto clear_all;
812
813 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
814 if (unlikely(ret))
815 goto clear_all;
816
817 /* success */
818 ret = hpre_send(ctx, msg);
819 if (likely(!ret))
820 return -EINPROGRESS;
821
822 clear_all:
823 hpre_rm_req_from_ctx(hpre_req);
824 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
825
826 return ret;
827 }
828
hpre_rsa_dec(struct akcipher_request * req)829 static int hpre_rsa_dec(struct akcipher_request *req)
830 {
831 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
832 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
833 void *tmp = akcipher_request_ctx(req);
834 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
835 struct hpre_sqe *msg = &hpre_req->req;
836 int ret;
837
838 /* For 512 and 1536 bits key size, use soft tfm instead */
839 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
840 ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
841 akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
842 ret = crypto_akcipher_decrypt(req);
843 akcipher_request_set_tfm(req, tfm);
844 return ret;
845 }
846
847 if (unlikely(!ctx->rsa.prikey))
848 return -EINVAL;
849
850 ret = hpre_msg_request_set(ctx, req, true);
851 if (unlikely(ret))
852 return ret;
853
854 if (ctx->crt_g2_mode) {
855 msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
856 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
857 HPRE_ALG_NC_CRT);
858 } else {
859 msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
860 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
861 HPRE_ALG_NC_NCRT);
862 }
863
864 ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
865 if (unlikely(ret))
866 goto clear_all;
867
868 ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
869 if (unlikely(ret))
870 goto clear_all;
871
872 /* success */
873 ret = hpre_send(ctx, msg);
874 if (likely(!ret))
875 return -EINPROGRESS;
876
877 clear_all:
878 hpre_rm_req_from_ctx(hpre_req);
879 hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
880
881 return ret;
882 }
883
hpre_rsa_set_n(struct hpre_ctx * ctx,const char * value,size_t vlen,bool private)884 static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
885 size_t vlen, bool private)
886 {
887 const char *ptr = value;
888
889 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
890
891 ctx->key_sz = vlen;
892
893 /* if invalid key size provided, we use software tfm */
894 if (!hpre_rsa_key_size_is_support(ctx->key_sz))
895 return 0;
896
897 ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
898 &ctx->rsa.dma_pubkey,
899 GFP_KERNEL);
900 if (!ctx->rsa.pubkey)
901 return -ENOMEM;
902
903 if (private) {
904 ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
905 &ctx->rsa.dma_prikey,
906 GFP_KERNEL);
907 if (!ctx->rsa.prikey) {
908 dma_free_coherent(ctx->dev, vlen << 1,
909 ctx->rsa.pubkey,
910 ctx->rsa.dma_pubkey);
911 ctx->rsa.pubkey = NULL;
912 return -ENOMEM;
913 }
914 memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
915 }
916 memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
917
918 /* Using hardware HPRE to do RSA */
919 return 1;
920 }
921
hpre_rsa_set_e(struct hpre_ctx * ctx,const char * value,size_t vlen)922 static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
923 size_t vlen)
924 {
925 const char *ptr = value;
926
927 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
928
929 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
930 return -EINVAL;
931
932 memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
933
934 return 0;
935 }
936
hpre_rsa_set_d(struct hpre_ctx * ctx,const char * value,size_t vlen)937 static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
938 size_t vlen)
939 {
940 const char *ptr = value;
941
942 hpre_rsa_drop_leading_zeros(&ptr, &vlen);
943
944 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
945 return -EINVAL;
946
947 memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
948
949 return 0;
950 }
951
hpre_crt_para_get(char * para,size_t para_sz,const char * raw,size_t raw_sz)952 static int hpre_crt_para_get(char *para, size_t para_sz,
953 const char *raw, size_t raw_sz)
954 {
955 const char *ptr = raw;
956 size_t len = raw_sz;
957
958 hpre_rsa_drop_leading_zeros(&ptr, &len);
959 if (!len || len > para_sz)
960 return -EINVAL;
961
962 memcpy(para + para_sz - len, ptr, len);
963
964 return 0;
965 }
966
hpre_rsa_setkey_crt(struct hpre_ctx * ctx,struct rsa_key * rsa_key)967 static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
968 {
969 unsigned int hlf_ksz = ctx->key_sz >> 1;
970 struct device *dev = ctx->dev;
971 u64 offset;
972 int ret;
973
974 ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
975 &ctx->rsa.dma_crt_prikey,
976 GFP_KERNEL);
977 if (!ctx->rsa.crt_prikey)
978 return -ENOMEM;
979
980 ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
981 rsa_key->dq, rsa_key->dq_sz);
982 if (ret)
983 goto free_key;
984
985 offset = hlf_ksz;
986 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
987 rsa_key->dp, rsa_key->dp_sz);
988 if (ret)
989 goto free_key;
990
991 offset = hlf_ksz * HPRE_CRT_Q;
992 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
993 rsa_key->q, rsa_key->q_sz);
994 if (ret)
995 goto free_key;
996
997 offset = hlf_ksz * HPRE_CRT_P;
998 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
999 rsa_key->p, rsa_key->p_sz);
1000 if (ret)
1001 goto free_key;
1002
1003 offset = hlf_ksz * HPRE_CRT_INV;
1004 ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
1005 rsa_key->qinv, rsa_key->qinv_sz);
1006 if (ret)
1007 goto free_key;
1008
1009 ctx->crt_g2_mode = true;
1010
1011 return 0;
1012
1013 free_key:
1014 offset = hlf_ksz * HPRE_CRT_PRMS;
1015 memzero_explicit(ctx->rsa.crt_prikey, offset);
1016 dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
1017 ctx->rsa.dma_crt_prikey);
1018 ctx->rsa.crt_prikey = NULL;
1019 ctx->crt_g2_mode = false;
1020
1021 return ret;
1022 }
1023
1024 /* If it is clear all, all the resources of the QP will be cleaned. */
hpre_rsa_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all)1025 static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
1026 {
1027 unsigned int half_key_sz = ctx->key_sz >> 1;
1028 struct device *dev = ctx->dev;
1029
1030 if (is_clear_all)
1031 hisi_qm_stop_qp(ctx->qp);
1032
1033 if (ctx->rsa.pubkey) {
1034 dma_free_coherent(dev, ctx->key_sz << 1,
1035 ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
1036 ctx->rsa.pubkey = NULL;
1037 }
1038
1039 if (ctx->rsa.crt_prikey) {
1040 memzero_explicit(ctx->rsa.crt_prikey,
1041 half_key_sz * HPRE_CRT_PRMS);
1042 dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
1043 ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
1044 ctx->rsa.crt_prikey = NULL;
1045 }
1046
1047 if (ctx->rsa.prikey) {
1048 memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
1049 dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
1050 ctx->rsa.dma_prikey);
1051 ctx->rsa.prikey = NULL;
1052 }
1053
1054 hpre_ctx_clear(ctx, is_clear_all);
1055 }
1056
1057 /*
1058 * we should judge if it is CRT or not,
1059 * CRT: return true, N-CRT: return false .
1060 */
hpre_is_crt_key(struct rsa_key * key)1061 static bool hpre_is_crt_key(struct rsa_key *key)
1062 {
1063 u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
1064 key->qinv_sz;
1065
1066 #define LEN_OF_NCRT_PARA 5
1067
1068 /* N-CRT less than 5 parameters */
1069 return len > LEN_OF_NCRT_PARA;
1070 }
1071
hpre_rsa_setkey(struct hpre_ctx * ctx,const void * key,unsigned int keylen,bool private)1072 static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
1073 unsigned int keylen, bool private)
1074 {
1075 struct rsa_key rsa_key;
1076 int ret;
1077
1078 hpre_rsa_clear_ctx(ctx, false);
1079
1080 if (private)
1081 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1082 else
1083 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1084 if (ret < 0)
1085 return ret;
1086
1087 ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
1088 if (ret <= 0)
1089 return ret;
1090
1091 if (private) {
1092 ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1093 if (ret < 0)
1094 goto free;
1095
1096 if (hpre_is_crt_key(&rsa_key)) {
1097 ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
1098 if (ret < 0)
1099 goto free;
1100 }
1101 }
1102
1103 ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1104 if (ret < 0)
1105 goto free;
1106
1107 if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
1108 ret = -EINVAL;
1109 goto free;
1110 }
1111
1112 return 0;
1113
1114 free:
1115 hpre_rsa_clear_ctx(ctx, false);
1116 return ret;
1117 }
1118
hpre_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1119 static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1120 unsigned int keylen)
1121 {
1122 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1123 int ret;
1124
1125 ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
1126 if (ret)
1127 return ret;
1128
1129 return hpre_rsa_setkey(ctx, key, keylen, false);
1130 }
1131
hpre_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1132 static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1133 unsigned int keylen)
1134 {
1135 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1136 int ret;
1137
1138 ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
1139 if (ret)
1140 return ret;
1141
1142 return hpre_rsa_setkey(ctx, key, keylen, true);
1143 }
1144
hpre_rsa_max_size(struct crypto_akcipher * tfm)1145 static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
1146 {
1147 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1148
1149 /* For 512 and 1536 bits key size, use soft tfm instead */
1150 if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
1151 ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
1152 return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
1153
1154 return ctx->key_sz;
1155 }
1156
hpre_rsa_init_tfm(struct crypto_akcipher * tfm)1157 static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
1158 {
1159 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1160 int ret;
1161
1162 ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
1163 if (IS_ERR(ctx->rsa.soft_tfm)) {
1164 pr_err("Can not alloc_akcipher!\n");
1165 return PTR_ERR(ctx->rsa.soft_tfm);
1166 }
1167
1168 ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
1169 if (ret)
1170 crypto_free_akcipher(ctx->rsa.soft_tfm);
1171
1172 return ret;
1173 }
1174
hpre_rsa_exit_tfm(struct crypto_akcipher * tfm)1175 static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
1176 {
1177 struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
1178
1179 hpre_rsa_clear_ctx(ctx, true);
1180 crypto_free_akcipher(ctx->rsa.soft_tfm);
1181 }
1182
hpre_key_to_big_end(u8 * data,int len)1183 static void hpre_key_to_big_end(u8 *data, int len)
1184 {
1185 int i, j;
1186
1187 for (i = 0; i < len / 2; i++) {
1188 j = len - i - 1;
1189 swap(data[j], data[i]);
1190 }
1191 }
1192
hpre_ecc_clear_ctx(struct hpre_ctx * ctx,bool is_clear_all,bool is_ecdh)1193 static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
1194 bool is_ecdh)
1195 {
1196 struct device *dev = ctx->dev;
1197 unsigned int sz = ctx->key_sz;
1198 unsigned int shift = sz << 1;
1199
1200 if (is_clear_all)
1201 hisi_qm_stop_qp(ctx->qp);
1202
1203 if (is_ecdh && ctx->ecdh.p) {
1204 /* ecdh: p->a->k->b */
1205 memzero_explicit(ctx->ecdh.p + shift, sz);
1206 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1207 ctx->ecdh.p = NULL;
1208 } else if (!is_ecdh && ctx->curve25519.p) {
1209 /* curve25519: p->a->k */
1210 memzero_explicit(ctx->curve25519.p + shift, sz);
1211 dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
1212 ctx->curve25519.dma_p);
1213 ctx->curve25519.p = NULL;
1214 }
1215
1216 hpre_ctx_clear(ctx, is_clear_all);
1217 }
1218
1219 /*
1220 * The bits of 192/224/256/384/521 are supported by HPRE,
1221 * and convert the bits like:
1222 * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
1223 * If the parameter bit width is insufficient, then we fill in the
1224 * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
1225 */
hpre_ecdh_supported_curve(unsigned short id)1226 static unsigned int hpre_ecdh_supported_curve(unsigned short id)
1227 {
1228 switch (id) {
1229 case ECC_CURVE_NIST_P192:
1230 case ECC_CURVE_NIST_P256:
1231 return HPRE_ECC_HW256_KSZ_B;
1232 case ECC_CURVE_NIST_P384:
1233 return HPRE_ECC_HW384_KSZ_B;
1234 default:
1235 break;
1236 }
1237
1238 return 0;
1239 }
1240
fill_curve_param(void * addr,u64 * param,unsigned int cur_sz,u8 ndigits)1241 static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
1242 {
1243 unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
1244 u8 i = 0;
1245
1246 while (i < ndigits - 1) {
1247 memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
1248 i++;
1249 }
1250
1251 memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
1252 hpre_key_to_big_end((u8 *)addr, cur_sz);
1253 }
1254
hpre_ecdh_fill_curve(struct hpre_ctx * ctx,struct ecdh * params,unsigned int cur_sz)1255 static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
1256 unsigned int cur_sz)
1257 {
1258 unsigned int shifta = ctx->key_sz << 1;
1259 unsigned int shiftb = ctx->key_sz << 2;
1260 void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
1261 void *a = ctx->ecdh.p + shifta - cur_sz;
1262 void *b = ctx->ecdh.p + shiftb - cur_sz;
1263 void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
1264 void *y = ctx->ecdh.g + shifta - cur_sz;
1265 const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
1266 char *n;
1267
1268 if (unlikely(!curve))
1269 return -EINVAL;
1270
1271 n = kzalloc(ctx->key_sz, GFP_KERNEL);
1272 if (!n)
1273 return -ENOMEM;
1274
1275 fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
1276 fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
1277 fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
1278 fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
1279 fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
1280 fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
1281
1282 if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
1283 kfree(n);
1284 return -EINVAL;
1285 }
1286
1287 kfree(n);
1288 return 0;
1289 }
1290
hpre_ecdh_get_curvesz(unsigned short id)1291 static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
1292 {
1293 switch (id) {
1294 case ECC_CURVE_NIST_P192:
1295 return HPRE_ECC_NIST_P192_N_SIZE;
1296 case ECC_CURVE_NIST_P256:
1297 return HPRE_ECC_NIST_P256_N_SIZE;
1298 case ECC_CURVE_NIST_P384:
1299 return HPRE_ECC_NIST_P384_N_SIZE;
1300 default:
1301 break;
1302 }
1303
1304 return 0;
1305 }
1306
hpre_ecdh_set_param(struct hpre_ctx * ctx,struct ecdh * params)1307 static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
1308 {
1309 struct device *dev = ctx->dev;
1310 unsigned int sz, shift, curve_sz;
1311 int ret;
1312
1313 ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
1314 if (!ctx->key_sz)
1315 return -EINVAL;
1316
1317 curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1318 if (!curve_sz || params->key_size > curve_sz)
1319 return -EINVAL;
1320
1321 sz = ctx->key_sz;
1322
1323 if (!ctx->ecdh.p) {
1324 ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
1325 GFP_KERNEL);
1326 if (!ctx->ecdh.p)
1327 return -ENOMEM;
1328 }
1329
1330 shift = sz << 2;
1331 ctx->ecdh.g = ctx->ecdh.p + shift;
1332 ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
1333
1334 ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
1335 if (ret) {
1336 dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
1337 dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
1338 ctx->ecdh.p = NULL;
1339 return ret;
1340 }
1341
1342 return 0;
1343 }
1344
hpre_key_is_zero(char * key,unsigned short key_sz)1345 static bool hpre_key_is_zero(char *key, unsigned short key_sz)
1346 {
1347 int i;
1348
1349 for (i = 0; i < key_sz; i++)
1350 if (key[i])
1351 return false;
1352
1353 return true;
1354 }
1355
ecdh_gen_privkey(struct hpre_ctx * ctx,struct ecdh * params)1356 static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
1357 {
1358 struct device *dev = ctx->dev;
1359 int ret;
1360
1361 ret = crypto_get_default_rng();
1362 if (ret) {
1363 dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
1364 return ret;
1365 }
1366
1367 ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
1368 params->key_size);
1369 crypto_put_default_rng();
1370 if (ret)
1371 dev_err(dev, "failed to get rng, ret = %d!\n", ret);
1372
1373 return ret;
1374 }
1375
hpre_ecdh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1376 static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1377 unsigned int len)
1378 {
1379 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1380 struct device *dev = ctx->dev;
1381 char key[HPRE_ECC_MAX_KSZ];
1382 unsigned int sz, sz_shift;
1383 struct ecdh params;
1384 int ret;
1385
1386 if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
1387 dev_err(dev, "failed to decode ecdh key!\n");
1388 return -EINVAL;
1389 }
1390
1391 /* Use stdrng to generate private key */
1392 if (!params.key || !params.key_size) {
1393 params.key = key;
1394 params.key_size = hpre_ecdh_get_curvesz(ctx->curve_id);
1395 ret = ecdh_gen_privkey(ctx, ¶ms);
1396 if (ret)
1397 return ret;
1398 }
1399
1400 if (hpre_key_is_zero(params.key, params.key_size)) {
1401 dev_err(dev, "Invalid hpre key!\n");
1402 return -EINVAL;
1403 }
1404
1405 hpre_ecc_clear_ctx(ctx, false, true);
1406
1407 ret = hpre_ecdh_set_param(ctx, ¶ms);
1408 if (ret < 0) {
1409 dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
1410 return ret;
1411 }
1412
1413 sz = ctx->key_sz;
1414 sz_shift = (sz << 1) + sz - params.key_size;
1415 memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
1416
1417 return 0;
1418 }
1419
hpre_ecdh_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1420 static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
1421 struct hpre_asym_request *req,
1422 struct scatterlist *dst,
1423 struct scatterlist *src)
1424 {
1425 struct device *dev = ctx->dev;
1426 struct hpre_sqe *sqe = &req->req;
1427 dma_addr_t dma;
1428
1429 dma = le64_to_cpu(sqe->in);
1430 if (unlikely(dma_mapping_error(dev, dma)))
1431 return;
1432
1433 if (src && req->src)
1434 dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
1435
1436 dma = le64_to_cpu(sqe->out);
1437 if (unlikely(dma_mapping_error(dev, dma)))
1438 return;
1439
1440 if (req->dst)
1441 dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
1442 if (dst)
1443 dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
1444 }
1445
hpre_ecdh_cb(struct hpre_ctx * ctx,void * resp)1446 static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
1447 {
1448 unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
1449 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1450 struct hpre_asym_request *req = NULL;
1451 struct kpp_request *areq;
1452 u64 overtime_thrhld;
1453 char *p;
1454 int ret;
1455
1456 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1457 areq = req->areq.ecdh;
1458 areq->dst_len = ctx->key_sz << 1;
1459
1460 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1461 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1462 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1463
1464 p = sg_virt(areq->dst);
1465 memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
1466 memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
1467
1468 hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1469 kpp_request_complete(areq, ret);
1470
1471 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1472 }
1473
hpre_ecdh_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1474 static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
1475 struct kpp_request *req)
1476 {
1477 struct hpre_asym_request *h_req;
1478 struct hpre_sqe *msg;
1479 int req_id;
1480 void *tmp;
1481
1482 if (req->dst_len < ctx->key_sz << 1) {
1483 req->dst_len = ctx->key_sz << 1;
1484 return -EINVAL;
1485 }
1486
1487 tmp = kpp_request_ctx(req);
1488 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1489 h_req->cb = hpre_ecdh_cb;
1490 h_req->areq.ecdh = req;
1491 msg = &h_req->req;
1492 memset(msg, 0, sizeof(*msg));
1493 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1494 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1495 msg->key = cpu_to_le64(ctx->ecdh.dma_p);
1496
1497 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1498 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1499 h_req->ctx = ctx;
1500
1501 req_id = hpre_add_req_to_ctx(h_req);
1502 if (req_id < 0)
1503 return -EBUSY;
1504
1505 msg->tag = cpu_to_le16((u16)req_id);
1506 return 0;
1507 }
1508
hpre_ecdh_src_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1509 static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
1510 struct scatterlist *data, unsigned int len)
1511 {
1512 struct hpre_sqe *msg = &hpre_req->req;
1513 struct hpre_ctx *ctx = hpre_req->ctx;
1514 struct device *dev = ctx->dev;
1515 unsigned int tmpshift;
1516 dma_addr_t dma = 0;
1517 void *ptr;
1518 int shift;
1519
1520 /* Src_data include gx and gy. */
1521 shift = ctx->key_sz - (len >> 1);
1522 if (unlikely(shift < 0))
1523 return -EINVAL;
1524
1525 ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
1526 if (unlikely(!ptr))
1527 return -ENOMEM;
1528
1529 tmpshift = ctx->key_sz << 1;
1530 scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
1531 memcpy(ptr + shift, ptr + tmpshift, len >> 1);
1532 memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
1533
1534 hpre_req->src = ptr;
1535 msg->in = cpu_to_le64(dma);
1536 return 0;
1537 }
1538
hpre_ecdh_dst_data_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1539 static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
1540 struct scatterlist *data, unsigned int len)
1541 {
1542 struct hpre_sqe *msg = &hpre_req->req;
1543 struct hpre_ctx *ctx = hpre_req->ctx;
1544 struct device *dev = ctx->dev;
1545 dma_addr_t dma;
1546
1547 if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
1548 dev_err(dev, "data or data length is illegal!\n");
1549 return -EINVAL;
1550 }
1551
1552 hpre_req->dst = NULL;
1553 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1554 if (unlikely(dma_mapping_error(dev, dma))) {
1555 dev_err(dev, "dma map data err!\n");
1556 return -ENOMEM;
1557 }
1558
1559 msg->out = cpu_to_le64(dma);
1560 return 0;
1561 }
1562
hpre_ecdh_compute_value(struct kpp_request * req)1563 static int hpre_ecdh_compute_value(struct kpp_request *req)
1564 {
1565 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1566 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1567 struct device *dev = ctx->dev;
1568 void *tmp = kpp_request_ctx(req);
1569 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1570 struct hpre_sqe *msg = &hpre_req->req;
1571 int ret;
1572
1573 ret = hpre_ecdh_msg_request_set(ctx, req);
1574 if (unlikely(ret)) {
1575 dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
1576 return ret;
1577 }
1578
1579 if (req->src) {
1580 ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
1581 if (unlikely(ret)) {
1582 dev_err(dev, "failed to init src data, ret = %d!\n", ret);
1583 goto clear_all;
1584 }
1585 } else {
1586 msg->in = cpu_to_le64(ctx->ecdh.dma_g);
1587 }
1588
1589 ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
1590 if (unlikely(ret)) {
1591 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1592 goto clear_all;
1593 }
1594
1595 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
1596 ret = hpre_send(ctx, msg);
1597 if (likely(!ret))
1598 return -EINPROGRESS;
1599
1600 clear_all:
1601 hpre_rm_req_from_ctx(hpre_req);
1602 hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1603 return ret;
1604 }
1605
hpre_ecdh_max_size(struct crypto_kpp * tfm)1606 static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
1607 {
1608 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1609
1610 /* max size is the pub_key_size, include x and y */
1611 return ctx->key_sz << 1;
1612 }
1613
hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp * tfm)1614 static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
1615 {
1616 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1617
1618 ctx->curve_id = ECC_CURVE_NIST_P192;
1619
1620 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1621 }
1622
hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp * tfm)1623 static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
1624 {
1625 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1626
1627 ctx->curve_id = ECC_CURVE_NIST_P256;
1628
1629 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1630 }
1631
hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp * tfm)1632 static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
1633 {
1634 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1635
1636 ctx->curve_id = ECC_CURVE_NIST_P384;
1637
1638 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1639 }
1640
hpre_ecdh_exit_tfm(struct crypto_kpp * tfm)1641 static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
1642 {
1643 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1644
1645 hpre_ecc_clear_ctx(ctx, true, true);
1646 }
1647
hpre_curve25519_fill_curve(struct hpre_ctx * ctx,const void * buf,unsigned int len)1648 static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
1649 unsigned int len)
1650 {
1651 u8 secret[CURVE25519_KEY_SIZE] = { 0 };
1652 unsigned int sz = ctx->key_sz;
1653 const struct ecc_curve *curve;
1654 unsigned int shift = sz << 1;
1655 void *p;
1656
1657 /*
1658 * The key from 'buf' is in little-endian, we should preprocess it as
1659 * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
1660 * then convert it to big endian. Only in this way, the result can be
1661 * the same as the software curve-25519 that exists in crypto.
1662 */
1663 memcpy(secret, buf, len);
1664 curve25519_clamp_secret(secret);
1665 hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
1666
1667 p = ctx->curve25519.p + sz - len;
1668
1669 curve = ecc_get_curve25519();
1670
1671 /* fill curve parameters */
1672 fill_curve_param(p, curve->p, len, curve->g.ndigits);
1673 fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
1674 memcpy(p + shift, secret, len);
1675 fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
1676 memzero_explicit(secret, CURVE25519_KEY_SIZE);
1677 }
1678
hpre_curve25519_set_param(struct hpre_ctx * ctx,const void * buf,unsigned int len)1679 static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
1680 unsigned int len)
1681 {
1682 struct device *dev = ctx->dev;
1683 unsigned int sz = ctx->key_sz;
1684 unsigned int shift = sz << 1;
1685
1686 /* p->a->k->gx */
1687 if (!ctx->curve25519.p) {
1688 ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
1689 &ctx->curve25519.dma_p,
1690 GFP_KERNEL);
1691 if (!ctx->curve25519.p)
1692 return -ENOMEM;
1693 }
1694
1695 ctx->curve25519.g = ctx->curve25519.p + shift + sz;
1696 ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
1697
1698 hpre_curve25519_fill_curve(ctx, buf, len);
1699
1700 return 0;
1701 }
1702
hpre_curve25519_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)1703 static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
1704 unsigned int len)
1705 {
1706 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1707 struct device *dev = ctx->dev;
1708 int ret = -EINVAL;
1709
1710 if (len != CURVE25519_KEY_SIZE ||
1711 !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1712 dev_err(dev, "key is null or key len is not 32bytes!\n");
1713 return ret;
1714 }
1715
1716 /* Free old secret if any */
1717 hpre_ecc_clear_ctx(ctx, false, false);
1718
1719 ctx->key_sz = CURVE25519_KEY_SIZE;
1720 ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
1721 if (ret) {
1722 dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
1723 hpre_ecc_clear_ctx(ctx, false, false);
1724 return ret;
1725 }
1726
1727 return 0;
1728 }
1729
hpre_curve25519_hw_data_clr_all(struct hpre_ctx * ctx,struct hpre_asym_request * req,struct scatterlist * dst,struct scatterlist * src)1730 static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
1731 struct hpre_asym_request *req,
1732 struct scatterlist *dst,
1733 struct scatterlist *src)
1734 {
1735 struct device *dev = ctx->dev;
1736 struct hpre_sqe *sqe = &req->req;
1737 dma_addr_t dma;
1738
1739 dma = le64_to_cpu(sqe->in);
1740 if (unlikely(dma_mapping_error(dev, dma)))
1741 return;
1742
1743 if (src && req->src)
1744 dma_free_coherent(dev, ctx->key_sz, req->src, dma);
1745
1746 dma = le64_to_cpu(sqe->out);
1747 if (unlikely(dma_mapping_error(dev, dma)))
1748 return;
1749
1750 if (req->dst)
1751 dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
1752 if (dst)
1753 dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
1754 }
1755
hpre_curve25519_cb(struct hpre_ctx * ctx,void * resp)1756 static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
1757 {
1758 struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
1759 struct hpre_asym_request *req = NULL;
1760 struct kpp_request *areq;
1761 u64 overtime_thrhld;
1762 int ret;
1763
1764 ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
1765 areq = req->areq.curve25519;
1766 areq->dst_len = ctx->key_sz;
1767
1768 overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
1769 if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
1770 atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
1771
1772 hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
1773
1774 hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
1775 kpp_request_complete(areq, ret);
1776
1777 atomic64_inc(&dfx[HPRE_RECV_CNT].value);
1778 }
1779
hpre_curve25519_msg_request_set(struct hpre_ctx * ctx,struct kpp_request * req)1780 static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
1781 struct kpp_request *req)
1782 {
1783 struct hpre_asym_request *h_req;
1784 struct hpre_sqe *msg;
1785 int req_id;
1786 void *tmp;
1787
1788 if (unlikely(req->dst_len < ctx->key_sz)) {
1789 req->dst_len = ctx->key_sz;
1790 return -EINVAL;
1791 }
1792
1793 tmp = kpp_request_ctx(req);
1794 h_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1795 h_req->cb = hpre_curve25519_cb;
1796 h_req->areq.curve25519 = req;
1797 msg = &h_req->req;
1798 memset(msg, 0, sizeof(*msg));
1799 msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
1800 msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
1801 msg->key = cpu_to_le64(ctx->curve25519.dma_p);
1802
1803 msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
1804 msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
1805 h_req->ctx = ctx;
1806
1807 req_id = hpre_add_req_to_ctx(h_req);
1808 if (req_id < 0)
1809 return -EBUSY;
1810
1811 msg->tag = cpu_to_le16((u16)req_id);
1812 return 0;
1813 }
1814
hpre_curve25519_src_modulo_p(u8 * ptr)1815 static void hpre_curve25519_src_modulo_p(u8 *ptr)
1816 {
1817 int i;
1818
1819 for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
1820 ptr[i] = 0;
1821
1822 /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
1823 ptr[i] -= 0xed;
1824 }
1825
hpre_curve25519_src_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1826 static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
1827 struct scatterlist *data, unsigned int len)
1828 {
1829 struct hpre_sqe *msg = &hpre_req->req;
1830 struct hpre_ctx *ctx = hpre_req->ctx;
1831 struct device *dev = ctx->dev;
1832 u8 p[CURVE25519_KEY_SIZE] = { 0 };
1833 const struct ecc_curve *curve;
1834 dma_addr_t dma = 0;
1835 u8 *ptr;
1836
1837 if (len != CURVE25519_KEY_SIZE) {
1838 dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
1839 return -EINVAL;
1840 }
1841
1842 ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
1843 if (unlikely(!ptr))
1844 return -ENOMEM;
1845
1846 scatterwalk_map_and_copy(ptr, data, 0, len, 0);
1847
1848 if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
1849 dev_err(dev, "gx is null!\n");
1850 goto err;
1851 }
1852
1853 /*
1854 * Src_data(gx) is in little-endian order, MSB in the final byte should
1855 * be masked as described in RFC7748, then transform it to big-endian
1856 * form, then hisi_hpre can use the data.
1857 */
1858 ptr[31] &= 0x7f;
1859 hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
1860
1861 curve = ecc_get_curve25519();
1862
1863 fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
1864
1865 /*
1866 * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
1867 * we get its modulus to p, and then use it.
1868 */
1869 if (memcmp(ptr, p, ctx->key_sz) == 0) {
1870 dev_err(dev, "gx is p!\n");
1871 goto err;
1872 } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
1873 hpre_curve25519_src_modulo_p(ptr);
1874 }
1875
1876 hpre_req->src = ptr;
1877 msg->in = cpu_to_le64(dma);
1878 return 0;
1879
1880 err:
1881 dma_free_coherent(dev, ctx->key_sz, ptr, dma);
1882 return -EINVAL;
1883 }
1884
hpre_curve25519_dst_init(struct hpre_asym_request * hpre_req,struct scatterlist * data,unsigned int len)1885 static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
1886 struct scatterlist *data, unsigned int len)
1887 {
1888 struct hpre_sqe *msg = &hpre_req->req;
1889 struct hpre_ctx *ctx = hpre_req->ctx;
1890 struct device *dev = ctx->dev;
1891 dma_addr_t dma;
1892
1893 if (!data || !sg_is_last(data) || len != ctx->key_sz) {
1894 dev_err(dev, "data or data length is illegal!\n");
1895 return -EINVAL;
1896 }
1897
1898 hpre_req->dst = NULL;
1899 dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
1900 if (unlikely(dma_mapping_error(dev, dma))) {
1901 dev_err(dev, "dma map data err!\n");
1902 return -ENOMEM;
1903 }
1904
1905 msg->out = cpu_to_le64(dma);
1906 return 0;
1907 }
1908
hpre_curve25519_compute_value(struct kpp_request * req)1909 static int hpre_curve25519_compute_value(struct kpp_request *req)
1910 {
1911 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
1912 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1913 struct device *dev = ctx->dev;
1914 void *tmp = kpp_request_ctx(req);
1915 struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, HPRE_ALIGN_SZ);
1916 struct hpre_sqe *msg = &hpre_req->req;
1917 int ret;
1918
1919 ret = hpre_curve25519_msg_request_set(ctx, req);
1920 if (unlikely(ret)) {
1921 dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
1922 return ret;
1923 }
1924
1925 if (req->src) {
1926 ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
1927 if (unlikely(ret)) {
1928 dev_err(dev, "failed to init src data, ret = %d!\n",
1929 ret);
1930 goto clear_all;
1931 }
1932 } else {
1933 msg->in = cpu_to_le64(ctx->curve25519.dma_g);
1934 }
1935
1936 ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
1937 if (unlikely(ret)) {
1938 dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
1939 goto clear_all;
1940 }
1941
1942 msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
1943 ret = hpre_send(ctx, msg);
1944 if (likely(!ret))
1945 return -EINPROGRESS;
1946
1947 clear_all:
1948 hpre_rm_req_from_ctx(hpre_req);
1949 hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
1950 return ret;
1951 }
1952
hpre_curve25519_max_size(struct crypto_kpp * tfm)1953 static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
1954 {
1955 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1956
1957 return ctx->key_sz;
1958 }
1959
hpre_curve25519_init_tfm(struct crypto_kpp * tfm)1960 static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
1961 {
1962 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1963
1964 return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
1965 }
1966
hpre_curve25519_exit_tfm(struct crypto_kpp * tfm)1967 static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
1968 {
1969 struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
1970
1971 hpre_ecc_clear_ctx(ctx, true, false);
1972 }
1973
1974 static struct akcipher_alg rsa = {
1975 .sign = hpre_rsa_dec,
1976 .verify = hpre_rsa_enc,
1977 .encrypt = hpre_rsa_enc,
1978 .decrypt = hpre_rsa_dec,
1979 .set_pub_key = hpre_rsa_setpubkey,
1980 .set_priv_key = hpre_rsa_setprivkey,
1981 .max_size = hpre_rsa_max_size,
1982 .init = hpre_rsa_init_tfm,
1983 .exit = hpre_rsa_exit_tfm,
1984 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
1985 .base = {
1986 .cra_ctxsize = sizeof(struct hpre_ctx),
1987 .cra_priority = HPRE_CRYPTO_ALG_PRI,
1988 .cra_name = "rsa",
1989 .cra_driver_name = "hpre-rsa",
1990 .cra_module = THIS_MODULE,
1991 },
1992 };
1993
1994 static struct kpp_alg dh = {
1995 .set_secret = hpre_dh_set_secret,
1996 .generate_public_key = hpre_dh_compute_value,
1997 .compute_shared_secret = hpre_dh_compute_value,
1998 .max_size = hpre_dh_max_size,
1999 .init = hpre_dh_init_tfm,
2000 .exit = hpre_dh_exit_tfm,
2001 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2002 .base = {
2003 .cra_ctxsize = sizeof(struct hpre_ctx),
2004 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2005 .cra_name = "dh",
2006 .cra_driver_name = "hpre-dh",
2007 .cra_module = THIS_MODULE,
2008 },
2009 };
2010
2011 static struct kpp_alg ecdh_curves[] = {
2012 {
2013 .set_secret = hpre_ecdh_set_secret,
2014 .generate_public_key = hpre_ecdh_compute_value,
2015 .compute_shared_secret = hpre_ecdh_compute_value,
2016 .max_size = hpre_ecdh_max_size,
2017 .init = hpre_ecdh_nist_p192_init_tfm,
2018 .exit = hpre_ecdh_exit_tfm,
2019 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2020 .base = {
2021 .cra_ctxsize = sizeof(struct hpre_ctx),
2022 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2023 .cra_name = "ecdh-nist-p192",
2024 .cra_driver_name = "hpre-ecdh-nist-p192",
2025 .cra_module = THIS_MODULE,
2026 },
2027 }, {
2028 .set_secret = hpre_ecdh_set_secret,
2029 .generate_public_key = hpre_ecdh_compute_value,
2030 .compute_shared_secret = hpre_ecdh_compute_value,
2031 .max_size = hpre_ecdh_max_size,
2032 .init = hpre_ecdh_nist_p256_init_tfm,
2033 .exit = hpre_ecdh_exit_tfm,
2034 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2035 .base = {
2036 .cra_ctxsize = sizeof(struct hpre_ctx),
2037 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2038 .cra_name = "ecdh-nist-p256",
2039 .cra_driver_name = "hpre-ecdh-nist-p256",
2040 .cra_module = THIS_MODULE,
2041 },
2042 }, {
2043 .set_secret = hpre_ecdh_set_secret,
2044 .generate_public_key = hpre_ecdh_compute_value,
2045 .compute_shared_secret = hpre_ecdh_compute_value,
2046 .max_size = hpre_ecdh_max_size,
2047 .init = hpre_ecdh_nist_p384_init_tfm,
2048 .exit = hpre_ecdh_exit_tfm,
2049 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2050 .base = {
2051 .cra_ctxsize = sizeof(struct hpre_ctx),
2052 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2053 .cra_name = "ecdh-nist-p384",
2054 .cra_driver_name = "hpre-ecdh-nist-p384",
2055 .cra_module = THIS_MODULE,
2056 },
2057 }
2058 };
2059
2060 static struct kpp_alg curve25519_alg = {
2061 .set_secret = hpre_curve25519_set_secret,
2062 .generate_public_key = hpre_curve25519_compute_value,
2063 .compute_shared_secret = hpre_curve25519_compute_value,
2064 .max_size = hpre_curve25519_max_size,
2065 .init = hpre_curve25519_init_tfm,
2066 .exit = hpre_curve25519_exit_tfm,
2067 .reqsize = sizeof(struct hpre_asym_request) + HPRE_ALIGN_SZ,
2068 .base = {
2069 .cra_ctxsize = sizeof(struct hpre_ctx),
2070 .cra_priority = HPRE_CRYPTO_ALG_PRI,
2071 .cra_name = "curve25519",
2072 .cra_driver_name = "hpre-curve25519",
2073 .cra_module = THIS_MODULE,
2074 },
2075 };
2076
hpre_register_rsa(struct hisi_qm * qm)2077 static int hpre_register_rsa(struct hisi_qm *qm)
2078 {
2079 int ret;
2080
2081 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2082 return 0;
2083
2084 rsa.base.cra_flags = 0;
2085 ret = crypto_register_akcipher(&rsa);
2086 if (ret)
2087 dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
2088
2089 return ret;
2090 }
2091
hpre_unregister_rsa(struct hisi_qm * qm)2092 static void hpre_unregister_rsa(struct hisi_qm *qm)
2093 {
2094 if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
2095 return;
2096
2097 crypto_unregister_akcipher(&rsa);
2098 }
2099
hpre_register_dh(struct hisi_qm * qm)2100 static int hpre_register_dh(struct hisi_qm *qm)
2101 {
2102 int ret;
2103
2104 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2105 return 0;
2106
2107 ret = crypto_register_kpp(&dh);
2108 if (ret)
2109 dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
2110
2111 return ret;
2112 }
2113
hpre_unregister_dh(struct hisi_qm * qm)2114 static void hpre_unregister_dh(struct hisi_qm *qm)
2115 {
2116 if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
2117 return;
2118
2119 crypto_unregister_kpp(&dh);
2120 }
2121
hpre_register_ecdh(struct hisi_qm * qm)2122 static int hpre_register_ecdh(struct hisi_qm *qm)
2123 {
2124 int ret, i;
2125
2126 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2127 return 0;
2128
2129 for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
2130 ret = crypto_register_kpp(&ecdh_curves[i]);
2131 if (ret) {
2132 dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
2133 ecdh_curves[i].base.cra_name, ret);
2134 goto unreg_kpp;
2135 }
2136 }
2137
2138 return 0;
2139
2140 unreg_kpp:
2141 for (--i; i >= 0; --i)
2142 crypto_unregister_kpp(&ecdh_curves[i]);
2143
2144 return ret;
2145 }
2146
hpre_unregister_ecdh(struct hisi_qm * qm)2147 static void hpre_unregister_ecdh(struct hisi_qm *qm)
2148 {
2149 int i;
2150
2151 if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
2152 return;
2153
2154 for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
2155 crypto_unregister_kpp(&ecdh_curves[i]);
2156 }
2157
hpre_register_x25519(struct hisi_qm * qm)2158 static int hpre_register_x25519(struct hisi_qm *qm)
2159 {
2160 int ret;
2161
2162 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2163 return 0;
2164
2165 ret = crypto_register_kpp(&curve25519_alg);
2166 if (ret)
2167 dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
2168
2169 return ret;
2170 }
2171
hpre_unregister_x25519(struct hisi_qm * qm)2172 static void hpre_unregister_x25519(struct hisi_qm *qm)
2173 {
2174 if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
2175 return;
2176
2177 crypto_unregister_kpp(&curve25519_alg);
2178 }
2179
hpre_algs_register(struct hisi_qm * qm)2180 int hpre_algs_register(struct hisi_qm *qm)
2181 {
2182 int ret;
2183
2184 ret = hpre_register_rsa(qm);
2185 if (ret)
2186 return ret;
2187
2188 ret = hpre_register_dh(qm);
2189 if (ret)
2190 goto unreg_rsa;
2191
2192 ret = hpre_register_ecdh(qm);
2193 if (ret)
2194 goto unreg_dh;
2195
2196 ret = hpre_register_x25519(qm);
2197 if (ret)
2198 goto unreg_ecdh;
2199
2200 return ret;
2201
2202 unreg_ecdh:
2203 hpre_unregister_ecdh(qm);
2204 unreg_dh:
2205 hpre_unregister_dh(qm);
2206 unreg_rsa:
2207 hpre_unregister_rsa(qm);
2208 return ret;
2209 }
2210
hpre_algs_unregister(struct hisi_qm * qm)2211 void hpre_algs_unregister(struct hisi_qm *qm)
2212 {
2213 hpre_unregister_x25519(qm);
2214 hpre_unregister_ecdh(qm);
2215 hpre_unregister_dh(qm);
2216 hpre_unregister_rsa(qm);
2217 }
2218