1 /*
2   This file is provided under a dual BSD/GPLv2 license.  When using or
3   redistributing this file, you may do so under either license.
4 
5   GPL LICENSE SUMMARY
6   Copyright(c) 2014 Intel Corporation.
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of version 2 of the GNU General Public License as
9   published by the Free Software Foundation.
10 
11   This program is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15 
16   Contact Information:
17   qat-linux@intel.com
18 
19   BSD LICENSE
20   Copyright(c) 2014 Intel Corporation.
21   Redistribution and use in source and binary forms, with or without
22   modification, are permitted provided that the following conditions
23   are met:
24 
25 	* Redistributions of source code must retain the above copyright
26 	  notice, this list of conditions and the following disclaimer.
27 	* Redistributions in binary form must reproduce the above copyright
28 	  notice, this list of conditions and the following disclaimer in
29 	  the documentation and/or other materials provided with the
30 	  distribution.
31 	* Neither the name of Intel Corporation nor the names of its
32 	  contributors may be used to endorse or promote products derived
33 	  from this software without specific prior written permission.
34 
35   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46 */
47 
48 #include <linux/module.h>
49 #include <crypto/internal/rsa.h>
50 #include <crypto/internal/akcipher.h>
51 #include <crypto/akcipher.h>
52 #include <crypto/kpp.h>
53 #include <crypto/internal/kpp.h>
54 #include <crypto/dh.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/fips.h>
57 #include <crypto/scatterwalk.h>
58 #include "icp_qat_fw_pke.h"
59 #include "adf_accel_devices.h"
60 #include "adf_transport.h"
61 #include "adf_common_drv.h"
62 #include "qat_crypto.h"
63 
64 static DEFINE_MUTEX(algs_lock);
65 static unsigned int active_devs;
66 
67 struct qat_rsa_input_params {
68 	union {
69 		struct {
70 			dma_addr_t m;
71 			dma_addr_t e;
72 			dma_addr_t n;
73 		} enc;
74 		struct {
75 			dma_addr_t c;
76 			dma_addr_t d;
77 			dma_addr_t n;
78 		} dec;
79 		struct {
80 			dma_addr_t c;
81 			dma_addr_t p;
82 			dma_addr_t q;
83 			dma_addr_t dp;
84 			dma_addr_t dq;
85 			dma_addr_t qinv;
86 		} dec_crt;
87 		u64 in_tab[8];
88 	};
89 } __packed __aligned(64);
90 
91 struct qat_rsa_output_params {
92 	union {
93 		struct {
94 			dma_addr_t c;
95 		} enc;
96 		struct {
97 			dma_addr_t m;
98 		} dec;
99 		u64 out_tab[8];
100 	};
101 } __packed __aligned(64);
102 
103 struct qat_rsa_ctx {
104 	char *n;
105 	char *e;
106 	char *d;
107 	char *p;
108 	char *q;
109 	char *dp;
110 	char *dq;
111 	char *qinv;
112 	dma_addr_t dma_n;
113 	dma_addr_t dma_e;
114 	dma_addr_t dma_d;
115 	dma_addr_t dma_p;
116 	dma_addr_t dma_q;
117 	dma_addr_t dma_dp;
118 	dma_addr_t dma_dq;
119 	dma_addr_t dma_qinv;
120 	unsigned int key_sz;
121 	bool crt_mode;
122 	struct qat_crypto_instance *inst;
123 } __packed __aligned(64);
124 
125 struct qat_dh_input_params {
126 	union {
127 		struct {
128 			dma_addr_t b;
129 			dma_addr_t xa;
130 			dma_addr_t p;
131 		} in;
132 		struct {
133 			dma_addr_t xa;
134 			dma_addr_t p;
135 		} in_g2;
136 		u64 in_tab[8];
137 	};
138 } __packed __aligned(64);
139 
140 struct qat_dh_output_params {
141 	union {
142 		dma_addr_t r;
143 		u64 out_tab[8];
144 	};
145 } __packed __aligned(64);
146 
147 struct qat_dh_ctx {
148 	char *g;
149 	char *xa;
150 	char *p;
151 	dma_addr_t dma_g;
152 	dma_addr_t dma_xa;
153 	dma_addr_t dma_p;
154 	unsigned int p_size;
155 	bool g2;
156 	struct qat_crypto_instance *inst;
157 } __packed __aligned(64);
158 
159 struct qat_asym_request {
160 	union {
161 		struct qat_rsa_input_params rsa;
162 		struct qat_dh_input_params dh;
163 	} in;
164 	union {
165 		struct qat_rsa_output_params rsa;
166 		struct qat_dh_output_params dh;
167 	} out;
168 	dma_addr_t phy_in;
169 	dma_addr_t phy_out;
170 	char *src_align;
171 	char *dst_align;
172 	struct icp_qat_fw_pke_request req;
173 	union {
174 		struct qat_rsa_ctx *rsa;
175 		struct qat_dh_ctx *dh;
176 	} ctx;
177 	union {
178 		struct akcipher_request *rsa;
179 		struct kpp_request *dh;
180 	} areq;
181 	int err;
182 	void (*cb)(struct icp_qat_fw_pke_resp *resp);
183 } __aligned(64);
184 
qat_dh_cb(struct icp_qat_fw_pke_resp * resp)185 static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
186 {
187 	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
188 	struct kpp_request *areq = req->areq.dh;
189 	struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
190 	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
191 				resp->pke_resp_hdr.comn_resp_flags);
192 
193 	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
194 
195 	if (areq->src) {
196 		if (req->src_align)
197 			dma_free_coherent(dev, req->ctx.dh->p_size,
198 					  req->src_align, req->in.dh.in.b);
199 		else
200 			dma_unmap_single(dev, req->in.dh.in.b,
201 					 req->ctx.dh->p_size, DMA_TO_DEVICE);
202 	}
203 
204 	areq->dst_len = req->ctx.dh->p_size;
205 	if (req->dst_align) {
206 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
207 					 areq->dst_len, 1);
208 
209 		dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align,
210 				  req->out.dh.r);
211 	} else {
212 		dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
213 				 DMA_FROM_DEVICE);
214 	}
215 
216 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
217 			 DMA_TO_DEVICE);
218 	dma_unmap_single(dev, req->phy_out,
219 			 sizeof(struct qat_dh_output_params),
220 			 DMA_TO_DEVICE);
221 
222 	kpp_request_complete(areq, err);
223 }
224 
225 #define PKE_DH_1536 0x390c1a49
226 #define PKE_DH_G2_1536 0x2e0b1a3e
227 #define PKE_DH_2048 0x4d0c1a60
228 #define PKE_DH_G2_2048 0x3e0b1a55
229 #define PKE_DH_3072 0x510c1a77
230 #define PKE_DH_G2_3072 0x3a0b1a6c
231 #define PKE_DH_4096 0x690c1a8e
232 #define PKE_DH_G2_4096 0x4a0b1a83
233 
qat_dh_fn_id(unsigned int len,bool g2)234 static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
235 {
236 	unsigned int bitslen = len << 3;
237 
238 	switch (bitslen) {
239 	case 1536:
240 		return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
241 	case 2048:
242 		return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
243 	case 3072:
244 		return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
245 	case 4096:
246 		return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
247 	default:
248 		return 0;
249 	};
250 }
251 
qat_dh_get_params(struct crypto_kpp * tfm)252 static inline struct qat_dh_ctx *qat_dh_get_params(struct crypto_kpp *tfm)
253 {
254 	return kpp_tfm_ctx(tfm);
255 }
256 
qat_dh_compute_value(struct kpp_request * req)257 static int qat_dh_compute_value(struct kpp_request *req)
258 {
259 	struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
260 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
261 	struct qat_crypto_instance *inst = ctx->inst;
262 	struct device *dev = &GET_DEV(inst->accel_dev);
263 	struct qat_asym_request *qat_req =
264 			PTR_ALIGN(kpp_request_ctx(req), 64);
265 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
266 	int ret, ctr = 0;
267 	int n_input_params = 0;
268 
269 	if (unlikely(!ctx->xa))
270 		return -EINVAL;
271 
272 	if (req->dst_len < ctx->p_size) {
273 		req->dst_len = ctx->p_size;
274 		return -EOVERFLOW;
275 	}
276 	memset(msg, '\0', sizeof(*msg));
277 	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
278 					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
279 
280 	msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
281 						    !req->src && ctx->g2);
282 	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
283 		return -EINVAL;
284 
285 	qat_req->cb = qat_dh_cb;
286 	qat_req->ctx.dh = ctx;
287 	qat_req->areq.dh = req;
288 	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
289 	msg->pke_hdr.comn_req_flags =
290 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
291 					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
292 
293 	/*
294 	 * If no source is provided use g as base
295 	 */
296 	if (req->src) {
297 		qat_req->in.dh.in.xa = ctx->dma_xa;
298 		qat_req->in.dh.in.p = ctx->dma_p;
299 		n_input_params = 3;
300 	} else {
301 		if (ctx->g2) {
302 			qat_req->in.dh.in_g2.xa = ctx->dma_xa;
303 			qat_req->in.dh.in_g2.p = ctx->dma_p;
304 			n_input_params = 2;
305 		} else {
306 			qat_req->in.dh.in.b = ctx->dma_g;
307 			qat_req->in.dh.in.xa = ctx->dma_xa;
308 			qat_req->in.dh.in.p = ctx->dma_p;
309 			n_input_params = 3;
310 		}
311 	}
312 
313 	ret = -ENOMEM;
314 	if (req->src) {
315 		/*
316 		 * src can be of any size in valid range, but HW expects it to
317 		 * be the same as modulo p so in case it is different we need
318 		 * to allocate a new buf and copy src data.
319 		 * In other case we just need to map the user provided buffer.
320 		 * Also need to make sure that it is in contiguous buffer.
321 		 */
322 		if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
323 			qat_req->src_align = NULL;
324 			qat_req->in.dh.in.b = dma_map_single(dev,
325 							     sg_virt(req->src),
326 							     req->src_len,
327 							     DMA_TO_DEVICE);
328 			if (unlikely(dma_mapping_error(dev,
329 						       qat_req->in.dh.in.b)))
330 				return ret;
331 
332 		} else {
333 			int shift = ctx->p_size - req->src_len;
334 
335 			qat_req->src_align = dma_zalloc_coherent(dev,
336 								 ctx->p_size,
337 								 &qat_req->in.dh.in.b,
338 								 GFP_KERNEL);
339 			if (unlikely(!qat_req->src_align))
340 				return ret;
341 
342 			scatterwalk_map_and_copy(qat_req->src_align + shift,
343 						 req->src, 0, req->src_len, 0);
344 		}
345 	}
346 	/*
347 	 * dst can be of any size in valid range, but HW expects it to be the
348 	 * same as modulo m so in case it is different we need to allocate a
349 	 * new buf and copy src data.
350 	 * In other case we just need to map the user provided buffer.
351 	 * Also need to make sure that it is in contiguous buffer.
352 	 */
353 	if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
354 		qat_req->dst_align = NULL;
355 		qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst),
356 						   req->dst_len,
357 						   DMA_FROM_DEVICE);
358 
359 		if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
360 			goto unmap_src;
361 
362 	} else {
363 		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->p_size,
364 							 &qat_req->out.dh.r,
365 							 GFP_KERNEL);
366 		if (unlikely(!qat_req->dst_align))
367 			goto unmap_src;
368 	}
369 
370 	qat_req->in.dh.in_tab[n_input_params] = 0;
371 	qat_req->out.dh.out_tab[1] = 0;
372 	/* Mapping in.in.b or in.in_g2.xa is the same */
373 	qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh.in.b,
374 					 sizeof(struct qat_dh_input_params),
375 					 DMA_TO_DEVICE);
376 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
377 		goto unmap_dst;
378 
379 	qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh.r,
380 					  sizeof(struct qat_dh_output_params),
381 					  DMA_TO_DEVICE);
382 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
383 		goto unmap_in_params;
384 
385 	msg->pke_mid.src_data_addr = qat_req->phy_in;
386 	msg->pke_mid.dest_data_addr = qat_req->phy_out;
387 	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
388 	msg->input_param_count = n_input_params;
389 	msg->output_param_count = 1;
390 
391 	do {
392 		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
393 	} while (ret == -EBUSY && ctr++ < 100);
394 
395 	if (!ret)
396 		return -EINPROGRESS;
397 
398 	if (!dma_mapping_error(dev, qat_req->phy_out))
399 		dma_unmap_single(dev, qat_req->phy_out,
400 				 sizeof(struct qat_dh_output_params),
401 				 DMA_TO_DEVICE);
402 unmap_in_params:
403 	if (!dma_mapping_error(dev, qat_req->phy_in))
404 		dma_unmap_single(dev, qat_req->phy_in,
405 				 sizeof(struct qat_dh_input_params),
406 				 DMA_TO_DEVICE);
407 unmap_dst:
408 	if (qat_req->dst_align)
409 		dma_free_coherent(dev, ctx->p_size, qat_req->dst_align,
410 				  qat_req->out.dh.r);
411 	else
412 		if (!dma_mapping_error(dev, qat_req->out.dh.r))
413 			dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
414 					 DMA_FROM_DEVICE);
415 unmap_src:
416 	if (req->src) {
417 		if (qat_req->src_align)
418 			dma_free_coherent(dev, ctx->p_size, qat_req->src_align,
419 					  qat_req->in.dh.in.b);
420 		else
421 			if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
422 				dma_unmap_single(dev, qat_req->in.dh.in.b,
423 						 ctx->p_size,
424 						 DMA_TO_DEVICE);
425 	}
426 	return ret;
427 }
428 
qat_dh_check_params_length(unsigned int p_len)429 static int qat_dh_check_params_length(unsigned int p_len)
430 {
431 	switch (p_len) {
432 	case 1536:
433 	case 2048:
434 	case 3072:
435 	case 4096:
436 		return 0;
437 	}
438 	return -EINVAL;
439 }
440 
qat_dh_set_params(struct qat_dh_ctx * ctx,struct dh * params)441 static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
442 {
443 	struct qat_crypto_instance *inst = ctx->inst;
444 	struct device *dev = &GET_DEV(inst->accel_dev);
445 
446 	if (qat_dh_check_params_length(params->p_size << 3))
447 		return -EINVAL;
448 
449 	ctx->p_size = params->p_size;
450 	ctx->p = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
451 	if (!ctx->p)
452 		return -ENOMEM;
453 	memcpy(ctx->p, params->p, ctx->p_size);
454 
455 	/* If g equals 2 don't copy it */
456 	if (params->g_size == 1 && *(char *)params->g == 0x02) {
457 		ctx->g2 = true;
458 		return 0;
459 	}
460 
461 	ctx->g = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
462 	if (!ctx->g)
463 		return -ENOMEM;
464 	memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
465 	       params->g_size);
466 
467 	return 0;
468 }
469 
qat_dh_clear_ctx(struct device * dev,struct qat_dh_ctx * ctx)470 static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
471 {
472 	if (ctx->g) {
473 		dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
474 		ctx->g = NULL;
475 	}
476 	if (ctx->xa) {
477 		dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
478 		ctx->xa = NULL;
479 	}
480 	if (ctx->p) {
481 		dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
482 		ctx->p = NULL;
483 	}
484 	ctx->p_size = 0;
485 	ctx->g2 = false;
486 }
487 
qat_dh_set_secret(struct crypto_kpp * tfm,const void * buf,unsigned int len)488 static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
489 			     unsigned int len)
490 {
491 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
492 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
493 	struct dh params;
494 	int ret;
495 
496 	if (crypto_dh_decode_key(buf, len, &params) < 0)
497 		return -EINVAL;
498 
499 	/* Free old secret if any */
500 	qat_dh_clear_ctx(dev, ctx);
501 
502 	ret = qat_dh_set_params(ctx, &params);
503 	if (ret < 0)
504 		goto err_clear_ctx;
505 
506 	ctx->xa = dma_zalloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
507 				      GFP_KERNEL);
508 	if (!ctx->xa) {
509 		ret = -ENOMEM;
510 		goto err_clear_ctx;
511 	}
512 	memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
513 	       params.key_size);
514 
515 	return 0;
516 
517 err_clear_ctx:
518 	qat_dh_clear_ctx(dev, ctx);
519 	return ret;
520 }
521 
qat_dh_max_size(struct crypto_kpp * tfm)522 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
523 {
524 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
525 
526 	return ctx->p_size;
527 }
528 
qat_dh_init_tfm(struct crypto_kpp * tfm)529 static int qat_dh_init_tfm(struct crypto_kpp *tfm)
530 {
531 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
532 	struct qat_crypto_instance *inst =
533 			qat_crypto_get_instance_node(get_current_node());
534 
535 	if (!inst)
536 		return -EINVAL;
537 
538 	ctx->p_size = 0;
539 	ctx->g2 = false;
540 	ctx->inst = inst;
541 	return 0;
542 }
543 
qat_dh_exit_tfm(struct crypto_kpp * tfm)544 static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
545 {
546 	struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
547 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
548 
549 	qat_dh_clear_ctx(dev, ctx);
550 	qat_crypto_put_instance(ctx->inst);
551 }
552 
qat_rsa_cb(struct icp_qat_fw_pke_resp * resp)553 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
554 {
555 	struct qat_asym_request *req = (void *)(__force long)resp->opaque;
556 	struct akcipher_request *areq = req->areq.rsa;
557 	struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
558 	int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
559 				resp->pke_resp_hdr.comn_resp_flags);
560 
561 	err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
562 
563 	if (req->src_align)
564 		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align,
565 				  req->in.rsa.enc.m);
566 	else
567 		dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
568 				 DMA_TO_DEVICE);
569 
570 	areq->dst_len = req->ctx.rsa->key_sz;
571 	if (req->dst_align) {
572 		scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
573 					 areq->dst_len, 1);
574 
575 		dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align,
576 				  req->out.rsa.enc.c);
577 	} else {
578 		dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
579 				 DMA_FROM_DEVICE);
580 	}
581 
582 	dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
583 			 DMA_TO_DEVICE);
584 	dma_unmap_single(dev, req->phy_out,
585 			 sizeof(struct qat_rsa_output_params),
586 			 DMA_TO_DEVICE);
587 
588 	akcipher_request_complete(areq, err);
589 }
590 
qat_alg_asym_callback(void * _resp)591 void qat_alg_asym_callback(void *_resp)
592 {
593 	struct icp_qat_fw_pke_resp *resp = _resp;
594 	struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
595 
596 	areq->cb(resp);
597 }
598 
599 #define PKE_RSA_EP_512 0x1c161b21
600 #define PKE_RSA_EP_1024 0x35111bf7
601 #define PKE_RSA_EP_1536 0x4d111cdc
602 #define PKE_RSA_EP_2048 0x6e111dba
603 #define PKE_RSA_EP_3072 0x7d111ea3
604 #define PKE_RSA_EP_4096 0xa5101f7e
605 
qat_rsa_enc_fn_id(unsigned int len)606 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
607 {
608 	unsigned int bitslen = len << 3;
609 
610 	switch (bitslen) {
611 	case 512:
612 		return PKE_RSA_EP_512;
613 	case 1024:
614 		return PKE_RSA_EP_1024;
615 	case 1536:
616 		return PKE_RSA_EP_1536;
617 	case 2048:
618 		return PKE_RSA_EP_2048;
619 	case 3072:
620 		return PKE_RSA_EP_3072;
621 	case 4096:
622 		return PKE_RSA_EP_4096;
623 	default:
624 		return 0;
625 	};
626 }
627 
628 #define PKE_RSA_DP1_512 0x1c161b3c
629 #define PKE_RSA_DP1_1024 0x35111c12
630 #define PKE_RSA_DP1_1536 0x4d111cf7
631 #define PKE_RSA_DP1_2048 0x6e111dda
632 #define PKE_RSA_DP1_3072 0x7d111ebe
633 #define PKE_RSA_DP1_4096 0xa5101f98
634 
qat_rsa_dec_fn_id(unsigned int len)635 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
636 {
637 	unsigned int bitslen = len << 3;
638 
639 	switch (bitslen) {
640 	case 512:
641 		return PKE_RSA_DP1_512;
642 	case 1024:
643 		return PKE_RSA_DP1_1024;
644 	case 1536:
645 		return PKE_RSA_DP1_1536;
646 	case 2048:
647 		return PKE_RSA_DP1_2048;
648 	case 3072:
649 		return PKE_RSA_DP1_3072;
650 	case 4096:
651 		return PKE_RSA_DP1_4096;
652 	default:
653 		return 0;
654 	};
655 }
656 
657 #define PKE_RSA_DP2_512 0x1c131b57
658 #define PKE_RSA_DP2_1024 0x26131c2d
659 #define PKE_RSA_DP2_1536 0x45111d12
660 #define PKE_RSA_DP2_2048 0x59121dfa
661 #define PKE_RSA_DP2_3072 0x81121ed9
662 #define PKE_RSA_DP2_4096 0xb1111fb2
663 
qat_rsa_dec_fn_id_crt(unsigned int len)664 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
665 {
666 	unsigned int bitslen = len << 3;
667 
668 	switch (bitslen) {
669 	case 512:
670 		return PKE_RSA_DP2_512;
671 	case 1024:
672 		return PKE_RSA_DP2_1024;
673 	case 1536:
674 		return PKE_RSA_DP2_1536;
675 	case 2048:
676 		return PKE_RSA_DP2_2048;
677 	case 3072:
678 		return PKE_RSA_DP2_3072;
679 	case 4096:
680 		return PKE_RSA_DP2_4096;
681 	default:
682 		return 0;
683 	};
684 }
685 
qat_rsa_enc(struct akcipher_request * req)686 static int qat_rsa_enc(struct akcipher_request *req)
687 {
688 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
689 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
690 	struct qat_crypto_instance *inst = ctx->inst;
691 	struct device *dev = &GET_DEV(inst->accel_dev);
692 	struct qat_asym_request *qat_req =
693 			PTR_ALIGN(akcipher_request_ctx(req), 64);
694 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
695 	int ret, ctr = 0;
696 
697 	if (unlikely(!ctx->n || !ctx->e))
698 		return -EINVAL;
699 
700 	if (req->dst_len < ctx->key_sz) {
701 		req->dst_len = ctx->key_sz;
702 		return -EOVERFLOW;
703 	}
704 	memset(msg, '\0', sizeof(*msg));
705 	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
706 					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
707 	msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
708 	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
709 		return -EINVAL;
710 
711 	qat_req->cb = qat_rsa_cb;
712 	qat_req->ctx.rsa = ctx;
713 	qat_req->areq.rsa = req;
714 	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
715 	msg->pke_hdr.comn_req_flags =
716 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
717 					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
718 
719 	qat_req->in.rsa.enc.e = ctx->dma_e;
720 	qat_req->in.rsa.enc.n = ctx->dma_n;
721 	ret = -ENOMEM;
722 
723 	/*
724 	 * src can be of any size in valid range, but HW expects it to be the
725 	 * same as modulo n so in case it is different we need to allocate a
726 	 * new buf and copy src data.
727 	 * In other case we just need to map the user provided buffer.
728 	 * Also need to make sure that it is in contiguous buffer.
729 	 */
730 	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
731 		qat_req->src_align = NULL;
732 		qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src),
733 						   req->src_len, DMA_TO_DEVICE);
734 		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
735 			return ret;
736 
737 	} else {
738 		int shift = ctx->key_sz - req->src_len;
739 
740 		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
741 							 &qat_req->in.rsa.enc.m,
742 							 GFP_KERNEL);
743 		if (unlikely(!qat_req->src_align))
744 			return ret;
745 
746 		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
747 					 0, req->src_len, 0);
748 	}
749 	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
750 		qat_req->dst_align = NULL;
751 		qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst),
752 							req->dst_len,
753 							DMA_FROM_DEVICE);
754 
755 		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
756 			goto unmap_src;
757 
758 	} else {
759 		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
760 							 &qat_req->out.rsa.enc.c,
761 							 GFP_KERNEL);
762 		if (unlikely(!qat_req->dst_align))
763 			goto unmap_src;
764 
765 	}
766 	qat_req->in.rsa.in_tab[3] = 0;
767 	qat_req->out.rsa.out_tab[1] = 0;
768 	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m,
769 					 sizeof(struct qat_rsa_input_params),
770 					 DMA_TO_DEVICE);
771 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
772 		goto unmap_dst;
773 
774 	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.enc.c,
775 					  sizeof(struct qat_rsa_output_params),
776 					  DMA_TO_DEVICE);
777 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
778 		goto unmap_in_params;
779 
780 	msg->pke_mid.src_data_addr = qat_req->phy_in;
781 	msg->pke_mid.dest_data_addr = qat_req->phy_out;
782 	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
783 	msg->input_param_count = 3;
784 	msg->output_param_count = 1;
785 	do {
786 		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
787 	} while (ret == -EBUSY && ctr++ < 100);
788 
789 	if (!ret)
790 		return -EINPROGRESS;
791 
792 	if (!dma_mapping_error(dev, qat_req->phy_out))
793 		dma_unmap_single(dev, qat_req->phy_out,
794 				 sizeof(struct qat_rsa_output_params),
795 				 DMA_TO_DEVICE);
796 unmap_in_params:
797 	if (!dma_mapping_error(dev, qat_req->phy_in))
798 		dma_unmap_single(dev, qat_req->phy_in,
799 				 sizeof(struct qat_rsa_input_params),
800 				 DMA_TO_DEVICE);
801 unmap_dst:
802 	if (qat_req->dst_align)
803 		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
804 				  qat_req->out.rsa.enc.c);
805 	else
806 		if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
807 			dma_unmap_single(dev, qat_req->out.rsa.enc.c,
808 					 ctx->key_sz, DMA_FROM_DEVICE);
809 unmap_src:
810 	if (qat_req->src_align)
811 		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
812 				  qat_req->in.rsa.enc.m);
813 	else
814 		if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
815 			dma_unmap_single(dev, qat_req->in.rsa.enc.m,
816 					 ctx->key_sz, DMA_TO_DEVICE);
817 	return ret;
818 }
819 
qat_rsa_dec(struct akcipher_request * req)820 static int qat_rsa_dec(struct akcipher_request *req)
821 {
822 	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
823 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
824 	struct qat_crypto_instance *inst = ctx->inst;
825 	struct device *dev = &GET_DEV(inst->accel_dev);
826 	struct qat_asym_request *qat_req =
827 			PTR_ALIGN(akcipher_request_ctx(req), 64);
828 	struct icp_qat_fw_pke_request *msg = &qat_req->req;
829 	int ret, ctr = 0;
830 
831 	if (unlikely(!ctx->n || !ctx->d))
832 		return -EINVAL;
833 
834 	if (req->dst_len < ctx->key_sz) {
835 		req->dst_len = ctx->key_sz;
836 		return -EOVERFLOW;
837 	}
838 	memset(msg, '\0', sizeof(*msg));
839 	ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
840 					  ICP_QAT_FW_COMN_REQ_FLAG_SET);
841 	msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
842 		qat_rsa_dec_fn_id_crt(ctx->key_sz) :
843 		qat_rsa_dec_fn_id(ctx->key_sz);
844 	if (unlikely(!msg->pke_hdr.cd_pars.func_id))
845 		return -EINVAL;
846 
847 	qat_req->cb = qat_rsa_cb;
848 	qat_req->ctx.rsa = ctx;
849 	qat_req->areq.rsa = req;
850 	msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
851 	msg->pke_hdr.comn_req_flags =
852 		ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
853 					    QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
854 
855 	if (ctx->crt_mode) {
856 		qat_req->in.rsa.dec_crt.p = ctx->dma_p;
857 		qat_req->in.rsa.dec_crt.q = ctx->dma_q;
858 		qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
859 		qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
860 		qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
861 	} else {
862 		qat_req->in.rsa.dec.d = ctx->dma_d;
863 		qat_req->in.rsa.dec.n = ctx->dma_n;
864 	}
865 	ret = -ENOMEM;
866 
867 	/*
868 	 * src can be of any size in valid range, but HW expects it to be the
869 	 * same as modulo n so in case it is different we need to allocate a
870 	 * new buf and copy src data.
871 	 * In other case we just need to map the user provided buffer.
872 	 * Also need to make sure that it is in contiguous buffer.
873 	 */
874 	if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
875 		qat_req->src_align = NULL;
876 		qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src),
877 						   req->dst_len, DMA_TO_DEVICE);
878 		if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
879 			return ret;
880 
881 	} else {
882 		int shift = ctx->key_sz - req->src_len;
883 
884 		qat_req->src_align = dma_zalloc_coherent(dev, ctx->key_sz,
885 							 &qat_req->in.rsa.dec.c,
886 							 GFP_KERNEL);
887 		if (unlikely(!qat_req->src_align))
888 			return ret;
889 
890 		scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
891 					 0, req->src_len, 0);
892 	}
893 	if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
894 		qat_req->dst_align = NULL;
895 		qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst),
896 						    req->dst_len,
897 						    DMA_FROM_DEVICE);
898 
899 		if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
900 			goto unmap_src;
901 
902 	} else {
903 		qat_req->dst_align = dma_zalloc_coherent(dev, ctx->key_sz,
904 							 &qat_req->out.rsa.dec.m,
905 							 GFP_KERNEL);
906 		if (unlikely(!qat_req->dst_align))
907 			goto unmap_src;
908 
909 	}
910 
911 	if (ctx->crt_mode)
912 		qat_req->in.rsa.in_tab[6] = 0;
913 	else
914 		qat_req->in.rsa.in_tab[3] = 0;
915 	qat_req->out.rsa.out_tab[1] = 0;
916 	qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.dec.c,
917 					 sizeof(struct qat_rsa_input_params),
918 					 DMA_TO_DEVICE);
919 	if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
920 		goto unmap_dst;
921 
922 	qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa.dec.m,
923 					  sizeof(struct qat_rsa_output_params),
924 					  DMA_TO_DEVICE);
925 	if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
926 		goto unmap_in_params;
927 
928 	msg->pke_mid.src_data_addr = qat_req->phy_in;
929 	msg->pke_mid.dest_data_addr = qat_req->phy_out;
930 	msg->pke_mid.opaque = (uint64_t)(__force long)qat_req;
931 	if (ctx->crt_mode)
932 		msg->input_param_count = 6;
933 	else
934 		msg->input_param_count = 3;
935 
936 	msg->output_param_count = 1;
937 	do {
938 		ret = adf_send_message(ctx->inst->pke_tx, (uint32_t *)msg);
939 	} while (ret == -EBUSY && ctr++ < 100);
940 
941 	if (!ret)
942 		return -EINPROGRESS;
943 
944 	if (!dma_mapping_error(dev, qat_req->phy_out))
945 		dma_unmap_single(dev, qat_req->phy_out,
946 				 sizeof(struct qat_rsa_output_params),
947 				 DMA_TO_DEVICE);
948 unmap_in_params:
949 	if (!dma_mapping_error(dev, qat_req->phy_in))
950 		dma_unmap_single(dev, qat_req->phy_in,
951 				 sizeof(struct qat_rsa_input_params),
952 				 DMA_TO_DEVICE);
953 unmap_dst:
954 	if (qat_req->dst_align)
955 		dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align,
956 				  qat_req->out.rsa.dec.m);
957 	else
958 		if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
959 			dma_unmap_single(dev, qat_req->out.rsa.dec.m,
960 					 ctx->key_sz, DMA_FROM_DEVICE);
961 unmap_src:
962 	if (qat_req->src_align)
963 		dma_free_coherent(dev, ctx->key_sz, qat_req->src_align,
964 				  qat_req->in.rsa.dec.c);
965 	else
966 		if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
967 			dma_unmap_single(dev, qat_req->in.rsa.dec.c,
968 					 ctx->key_sz, DMA_TO_DEVICE);
969 	return ret;
970 }
971 
qat_rsa_set_n(struct qat_rsa_ctx * ctx,const char * value,size_t vlen)972 static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
973 			 size_t vlen)
974 {
975 	struct qat_crypto_instance *inst = ctx->inst;
976 	struct device *dev = &GET_DEV(inst->accel_dev);
977 	const char *ptr = value;
978 	int ret;
979 
980 	while (!*ptr && vlen) {
981 		ptr++;
982 		vlen--;
983 	}
984 
985 	ctx->key_sz = vlen;
986 	ret = -EINVAL;
987 	/* invalid key size provided */
988 	if (!qat_rsa_enc_fn_id(ctx->key_sz))
989 		goto err;
990 
991 	ret = -ENOMEM;
992 	ctx->n = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
993 	if (!ctx->n)
994 		goto err;
995 
996 	memcpy(ctx->n, ptr, ctx->key_sz);
997 	return 0;
998 err:
999 	ctx->key_sz = 0;
1000 	ctx->n = NULL;
1001 	return ret;
1002 }
1003 
qat_rsa_set_e(struct qat_rsa_ctx * ctx,const char * value,size_t vlen)1004 static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
1005 			 size_t vlen)
1006 {
1007 	struct qat_crypto_instance *inst = ctx->inst;
1008 	struct device *dev = &GET_DEV(inst->accel_dev);
1009 	const char *ptr = value;
1010 
1011 	while (!*ptr && vlen) {
1012 		ptr++;
1013 		vlen--;
1014 	}
1015 
1016 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
1017 		ctx->e = NULL;
1018 		return -EINVAL;
1019 	}
1020 
1021 	ctx->e = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1022 	if (!ctx->e)
1023 		return -ENOMEM;
1024 
1025 	memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
1026 	return 0;
1027 }
1028 
qat_rsa_set_d(struct qat_rsa_ctx * ctx,const char * value,size_t vlen)1029 static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
1030 			 size_t vlen)
1031 {
1032 	struct qat_crypto_instance *inst = ctx->inst;
1033 	struct device *dev = &GET_DEV(inst->accel_dev);
1034 	const char *ptr = value;
1035 	int ret;
1036 
1037 	while (!*ptr && vlen) {
1038 		ptr++;
1039 		vlen--;
1040 	}
1041 
1042 	ret = -EINVAL;
1043 	if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1044 		goto err;
1045 
1046 	ret = -ENOMEM;
1047 	ctx->d = dma_zalloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1048 	if (!ctx->d)
1049 		goto err;
1050 
1051 	memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1052 	return 0;
1053 err:
1054 	ctx->d = NULL;
1055 	return ret;
1056 }
1057 
qat_rsa_drop_leading_zeros(const char ** ptr,unsigned int * len)1058 static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1059 {
1060 	while (!**ptr && *len) {
1061 		(*ptr)++;
1062 		(*len)--;
1063 	}
1064 }
1065 
qat_rsa_setkey_crt(struct qat_rsa_ctx * ctx,struct rsa_key * rsa_key)1066 static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1067 {
1068 	struct qat_crypto_instance *inst = ctx->inst;
1069 	struct device *dev = &GET_DEV(inst->accel_dev);
1070 	const char *ptr;
1071 	unsigned int len;
1072 	unsigned int half_key_sz = ctx->key_sz / 2;
1073 
1074 	/* p */
1075 	ptr = rsa_key->p;
1076 	len = rsa_key->p_sz;
1077 	qat_rsa_drop_leading_zeros(&ptr, &len);
1078 	if (!len)
1079 		goto err;
1080 	ctx->p = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1081 	if (!ctx->p)
1082 		goto err;
1083 	memcpy(ctx->p + (half_key_sz - len), ptr, len);
1084 
1085 	/* q */
1086 	ptr = rsa_key->q;
1087 	len = rsa_key->q_sz;
1088 	qat_rsa_drop_leading_zeros(&ptr, &len);
1089 	if (!len)
1090 		goto free_p;
1091 	ctx->q = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1092 	if (!ctx->q)
1093 		goto free_p;
1094 	memcpy(ctx->q + (half_key_sz - len), ptr, len);
1095 
1096 	/* dp */
1097 	ptr = rsa_key->dp;
1098 	len = rsa_key->dp_sz;
1099 	qat_rsa_drop_leading_zeros(&ptr, &len);
1100 	if (!len)
1101 		goto free_q;
1102 	ctx->dp = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1103 				      GFP_KERNEL);
1104 	if (!ctx->dp)
1105 		goto free_q;
1106 	memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1107 
1108 	/* dq */
1109 	ptr = rsa_key->dq;
1110 	len = rsa_key->dq_sz;
1111 	qat_rsa_drop_leading_zeros(&ptr, &len);
1112 	if (!len)
1113 		goto free_dp;
1114 	ctx->dq = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1115 				      GFP_KERNEL);
1116 	if (!ctx->dq)
1117 		goto free_dp;
1118 	memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1119 
1120 	/* qinv */
1121 	ptr = rsa_key->qinv;
1122 	len = rsa_key->qinv_sz;
1123 	qat_rsa_drop_leading_zeros(&ptr, &len);
1124 	if (!len)
1125 		goto free_dq;
1126 	ctx->qinv = dma_zalloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1127 					GFP_KERNEL);
1128 	if (!ctx->qinv)
1129 		goto free_dq;
1130 	memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1131 
1132 	ctx->crt_mode = true;
1133 	return;
1134 
1135 free_dq:
1136 	memset(ctx->dq, '\0', half_key_sz);
1137 	dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1138 	ctx->dq = NULL;
1139 free_dp:
1140 	memset(ctx->dp, '\0', half_key_sz);
1141 	dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1142 	ctx->dp = NULL;
1143 free_q:
1144 	memset(ctx->q, '\0', half_key_sz);
1145 	dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1146 	ctx->q = NULL;
1147 free_p:
1148 	memset(ctx->p, '\0', half_key_sz);
1149 	dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1150 	ctx->p = NULL;
1151 err:
1152 	ctx->crt_mode = false;
1153 }
1154 
qat_rsa_clear_ctx(struct device * dev,struct qat_rsa_ctx * ctx)1155 static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1156 {
1157 	unsigned int half_key_sz = ctx->key_sz / 2;
1158 
1159 	/* Free the old key if any */
1160 	if (ctx->n)
1161 		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1162 	if (ctx->e)
1163 		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1164 	if (ctx->d) {
1165 		memset(ctx->d, '\0', ctx->key_sz);
1166 		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1167 	}
1168 	if (ctx->p) {
1169 		memset(ctx->p, '\0', half_key_sz);
1170 		dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1171 	}
1172 	if (ctx->q) {
1173 		memset(ctx->q, '\0', half_key_sz);
1174 		dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1175 	}
1176 	if (ctx->dp) {
1177 		memset(ctx->dp, '\0', half_key_sz);
1178 		dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1179 	}
1180 	if (ctx->dq) {
1181 		memset(ctx->dq, '\0', half_key_sz);
1182 		dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1183 	}
1184 	if (ctx->qinv) {
1185 		memset(ctx->qinv, '\0', half_key_sz);
1186 		dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1187 	}
1188 
1189 	ctx->n = NULL;
1190 	ctx->e = NULL;
1191 	ctx->d = NULL;
1192 	ctx->p = NULL;
1193 	ctx->q = NULL;
1194 	ctx->dp = NULL;
1195 	ctx->dq = NULL;
1196 	ctx->qinv = NULL;
1197 	ctx->crt_mode = false;
1198 	ctx->key_sz = 0;
1199 }
1200 
qat_rsa_setkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen,bool private)1201 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1202 			  unsigned int keylen, bool private)
1203 {
1204 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1205 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1206 	struct rsa_key rsa_key;
1207 	int ret;
1208 
1209 	qat_rsa_clear_ctx(dev, ctx);
1210 
1211 	if (private)
1212 		ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1213 	else
1214 		ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1215 	if (ret < 0)
1216 		goto free;
1217 
1218 	ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1219 	if (ret < 0)
1220 		goto free;
1221 	ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1222 	if (ret < 0)
1223 		goto free;
1224 	if (private) {
1225 		ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1226 		if (ret < 0)
1227 			goto free;
1228 		qat_rsa_setkey_crt(ctx, &rsa_key);
1229 	}
1230 
1231 	if (!ctx->n || !ctx->e) {
1232 		/* invalid key provided */
1233 		ret = -EINVAL;
1234 		goto free;
1235 	}
1236 	if (private && !ctx->d) {
1237 		/* invalid private key provided */
1238 		ret = -EINVAL;
1239 		goto free;
1240 	}
1241 
1242 	return 0;
1243 free:
1244 	qat_rsa_clear_ctx(dev, ctx);
1245 	return ret;
1246 }
1247 
qat_rsa_setpubkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1248 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1249 			     unsigned int keylen)
1250 {
1251 	return qat_rsa_setkey(tfm, key, keylen, false);
1252 }
1253 
qat_rsa_setprivkey(struct crypto_akcipher * tfm,const void * key,unsigned int keylen)1254 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1255 			      unsigned int keylen)
1256 {
1257 	return qat_rsa_setkey(tfm, key, keylen, true);
1258 }
1259 
qat_rsa_max_size(struct crypto_akcipher * tfm)1260 static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
1261 {
1262 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1263 
1264 	return ctx->key_sz;
1265 }
1266 
qat_rsa_init_tfm(struct crypto_akcipher * tfm)1267 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1268 {
1269 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1270 	struct qat_crypto_instance *inst =
1271 			qat_crypto_get_instance_node(get_current_node());
1272 
1273 	if (!inst)
1274 		return -EINVAL;
1275 
1276 	ctx->key_sz = 0;
1277 	ctx->inst = inst;
1278 	return 0;
1279 }
1280 
qat_rsa_exit_tfm(struct crypto_akcipher * tfm)1281 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1282 {
1283 	struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1284 	struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1285 
1286 	if (ctx->n)
1287 		dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1288 	if (ctx->e)
1289 		dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1290 	if (ctx->d) {
1291 		memset(ctx->d, '\0', ctx->key_sz);
1292 		dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1293 	}
1294 	qat_crypto_put_instance(ctx->inst);
1295 	ctx->n = NULL;
1296 	ctx->e = NULL;
1297 	ctx->d = NULL;
1298 }
1299 
1300 static struct akcipher_alg rsa = {
1301 	.encrypt = qat_rsa_enc,
1302 	.decrypt = qat_rsa_dec,
1303 	.sign = qat_rsa_dec,
1304 	.verify = qat_rsa_enc,
1305 	.set_pub_key = qat_rsa_setpubkey,
1306 	.set_priv_key = qat_rsa_setprivkey,
1307 	.max_size = qat_rsa_max_size,
1308 	.init = qat_rsa_init_tfm,
1309 	.exit = qat_rsa_exit_tfm,
1310 	.reqsize = sizeof(struct qat_asym_request) + 64,
1311 	.base = {
1312 		.cra_name = "rsa",
1313 		.cra_driver_name = "qat-rsa",
1314 		.cra_priority = 1000,
1315 		.cra_module = THIS_MODULE,
1316 		.cra_ctxsize = sizeof(struct qat_rsa_ctx),
1317 	},
1318 };
1319 
1320 static struct kpp_alg dh = {
1321 	.set_secret = qat_dh_set_secret,
1322 	.generate_public_key = qat_dh_compute_value,
1323 	.compute_shared_secret = qat_dh_compute_value,
1324 	.max_size = qat_dh_max_size,
1325 	.init = qat_dh_init_tfm,
1326 	.exit = qat_dh_exit_tfm,
1327 	.reqsize = sizeof(struct qat_asym_request) + 64,
1328 	.base = {
1329 		.cra_name = "dh",
1330 		.cra_driver_name = "qat-dh",
1331 		.cra_priority = 1000,
1332 		.cra_module = THIS_MODULE,
1333 		.cra_ctxsize = sizeof(struct qat_dh_ctx),
1334 	},
1335 };
1336 
qat_asym_algs_register(void)1337 int qat_asym_algs_register(void)
1338 {
1339 	int ret = 0;
1340 
1341 	mutex_lock(&algs_lock);
1342 	if (++active_devs == 1) {
1343 		rsa.base.cra_flags = 0;
1344 		ret = crypto_register_akcipher(&rsa);
1345 		if (ret)
1346 			goto unlock;
1347 		ret = crypto_register_kpp(&dh);
1348 	}
1349 unlock:
1350 	mutex_unlock(&algs_lock);
1351 	return ret;
1352 }
1353 
qat_asym_algs_unregister(void)1354 void qat_asym_algs_unregister(void)
1355 {
1356 	mutex_lock(&algs_lock);
1357 	if (--active_devs == 0) {
1358 		crypto_unregister_akcipher(&rsa);
1359 		crypto_unregister_kpp(&dh);
1360 	}
1361 	mutex_unlock(&algs_lock);
1362 }
1363