1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *	Manoj Malviya (manojmalviya@chelsio.com)
36  *	Atul Gupta (atul.gupta@chelsio.com)
37  *	Jitendra Lulla (jlulla@chelsio.com)
38  *	Yeshaswi M R Gowda (yeshaswi@chelsio.com)
39  *	Harsh Jain (harsh@chelsio.com)
40  */
41 
42 #define pr_fmt(fmt) "chcr:" fmt
43 
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52 
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67 
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73 
74 #define IV AES_BLOCK_SIZE
75 
76 static unsigned int sgl_ent_len[] = {
77 	0, 0, 16, 24, 40, 48, 64, 72, 88,
78 	96, 112, 120, 136, 144, 160, 168, 184,
79 	192, 208, 216, 232, 240, 256, 264, 280,
80 	288, 304, 312, 328, 336, 352, 360, 376
81 };
82 
83 static unsigned int dsgl_ent_len[] = {
84 	0, 32, 32, 48, 48, 64, 64, 80, 80,
85 	112, 112, 128, 128, 144, 144, 160, 160,
86 	192, 192, 208, 208, 224, 224, 240, 240,
87 	272, 272, 288, 288, 304, 304, 320, 320
88 };
89 
90 static u32 round_constant[11] = {
91 	0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 	0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 	0x1B000000, 0x36000000, 0x6C000000
94 };
95 
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 				   unsigned char *input, int err);
98 
AEAD_CTX(struct chcr_context * ctx)99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101 	return ctx->crypto_ctx->aeadctx;
102 }
103 
ABLK_CTX(struct chcr_context * ctx)104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106 	return ctx->crypto_ctx->ablkctx;
107 }
108 
HMAC_CTX(struct chcr_context * ctx)109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111 	return ctx->crypto_ctx->hmacctx;
112 }
113 
GCM_CTX(struct chcr_aead_ctx * gctx)114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116 	return gctx->ctx->gcm;
117 }
118 
AUTHENC_CTX(struct chcr_aead_ctx * gctx)119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121 	return gctx->ctx->authenc;
122 }
123 
ULD_CTX(struct chcr_context * ctx)124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126 	return container_of(ctx->dev, struct uld_ctx, dev);
127 }
128 
is_ofld_imm(const struct sk_buff * skb)129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131 	return (skb->len <= SGE_MAX_WR_LEN);
132 }
133 
chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx * reqctx)134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
135 {
136 	memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
137 }
138 
sg_nents_xlen(struct scatterlist * sg,unsigned int reqlen,unsigned int entlen,unsigned int skip)139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
140 			 unsigned int entlen,
141 			 unsigned int skip)
142 {
143 	int nents = 0;
144 	unsigned int less;
145 	unsigned int skip_len = 0;
146 
147 	while (sg && skip) {
148 		if (sg_dma_len(sg) <= skip) {
149 			skip -= sg_dma_len(sg);
150 			skip_len = 0;
151 			sg = sg_next(sg);
152 		} else {
153 			skip_len = skip;
154 			skip = 0;
155 		}
156 	}
157 
158 	while (sg && reqlen) {
159 		less = min(reqlen, sg_dma_len(sg) - skip_len);
160 		nents += DIV_ROUND_UP(less, entlen);
161 		reqlen -= less;
162 		skip_len = 0;
163 		sg = sg_next(sg);
164 	}
165 	return nents;
166 }
167 
get_aead_subtype(struct crypto_aead * aead)168 static inline int get_aead_subtype(struct crypto_aead *aead)
169 {
170 	struct aead_alg *alg = crypto_aead_alg(aead);
171 	struct chcr_alg_template *chcr_crypto_alg =
172 		container_of(alg, struct chcr_alg_template, alg.aead);
173 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
174 }
175 
chcr_verify_tag(struct aead_request * req,u8 * input,int * err)176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
177 {
178 	u8 temp[SHA512_DIGEST_SIZE];
179 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 	int authsize = crypto_aead_authsize(tfm);
181 	struct cpl_fw6_pld *fw6_pld;
182 	int cmp = 0;
183 
184 	fw6_pld = (struct cpl_fw6_pld *)input;
185 	if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 	    (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 		cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
188 	} else {
189 
190 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 				authsize, req->assoclen +
192 				req->cryptlen - authsize);
193 		cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
194 	}
195 	if (cmp)
196 		*err = -EBADMSG;
197 	else
198 		*err = 0;
199 }
200 
chcr_inc_wrcount(struct chcr_dev * dev)201 static int chcr_inc_wrcount(struct chcr_dev *dev)
202 {
203 	if (dev->state == CHCR_DETACH)
204 		return 1;
205 	atomic_inc(&dev->inflight);
206 	return 0;
207 }
208 
chcr_dec_wrcount(struct chcr_dev * dev)209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
210 {
211 	atomic_dec(&dev->inflight);
212 }
213 
chcr_handle_aead_resp(struct aead_request * req,unsigned char * input,int err)214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215 					 unsigned char *input,
216 					 int err)
217 {
218 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 	struct chcr_dev *dev = a_ctx(tfm)->dev;
221 
222 	chcr_aead_common_exit(req);
223 	if (reqctx->verify == VERIFY_SW) {
224 		chcr_verify_tag(req, input, &err);
225 		reqctx->verify = VERIFY_HW;
226 	}
227 	chcr_dec_wrcount(dev);
228 	req->base.complete(&req->base, err);
229 
230 	return err;
231 }
232 
get_aes_decrypt_key(unsigned char * dec_key,const unsigned char * key,unsigned int keylength)233 static void get_aes_decrypt_key(unsigned char *dec_key,
234 				       const unsigned char *key,
235 				       unsigned int keylength)
236 {
237 	u32 temp;
238 	u32 w_ring[MAX_NK];
239 	int i, j, k;
240 	u8  nr, nk;
241 
242 	switch (keylength) {
243 	case AES_KEYLENGTH_128BIT:
244 		nk = KEYLENGTH_4BYTES;
245 		nr = NUMBER_OF_ROUNDS_10;
246 		break;
247 	case AES_KEYLENGTH_192BIT:
248 		nk = KEYLENGTH_6BYTES;
249 		nr = NUMBER_OF_ROUNDS_12;
250 		break;
251 	case AES_KEYLENGTH_256BIT:
252 		nk = KEYLENGTH_8BYTES;
253 		nr = NUMBER_OF_ROUNDS_14;
254 		break;
255 	default:
256 		return;
257 	}
258 	for (i = 0; i < nk; i++)
259 		w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
260 
261 	i = 0;
262 	temp = w_ring[nk - 1];
263 	while (i + nk < (nr + 1) * 4) {
264 		if (!(i % nk)) {
265 			/* RotWord(temp) */
266 			temp = (temp << 8) | (temp >> 24);
267 			temp = aes_ks_subword(temp);
268 			temp ^= round_constant[i / nk];
269 		} else if (nk == 8 && (i % 4 == 0)) {
270 			temp = aes_ks_subword(temp);
271 		}
272 		w_ring[i % nk] ^= temp;
273 		temp = w_ring[i % nk];
274 		i++;
275 	}
276 	i--;
277 	for (k = 0, j = i % nk; k < nk; k++) {
278 		*((u32 *)dec_key + k) = htonl(w_ring[j]);
279 		j--;
280 		if (j < 0)
281 			j += nk;
282 	}
283 }
284 
chcr_alloc_shash(unsigned int ds)285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
286 {
287 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
288 
289 	switch (ds) {
290 	case SHA1_DIGEST_SIZE:
291 		base_hash = crypto_alloc_shash("sha1", 0, 0);
292 		break;
293 	case SHA224_DIGEST_SIZE:
294 		base_hash = crypto_alloc_shash("sha224", 0, 0);
295 		break;
296 	case SHA256_DIGEST_SIZE:
297 		base_hash = crypto_alloc_shash("sha256", 0, 0);
298 		break;
299 	case SHA384_DIGEST_SIZE:
300 		base_hash = crypto_alloc_shash("sha384", 0, 0);
301 		break;
302 	case SHA512_DIGEST_SIZE:
303 		base_hash = crypto_alloc_shash("sha512", 0, 0);
304 		break;
305 	}
306 
307 	return base_hash;
308 }
309 
chcr_compute_partial_hash(struct shash_desc * desc,char * iopad,char * result_hash,int digest_size)310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311 				     char *iopad, char *result_hash,
312 				     int digest_size)
313 {
314 	struct sha1_state sha1_st;
315 	struct sha256_state sha256_st;
316 	struct sha512_state sha512_st;
317 	int error;
318 
319 	if (digest_size == SHA1_DIGEST_SIZE) {
320 		error = crypto_shash_init(desc) ?:
321 			crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 			crypto_shash_export(desc, (void *)&sha1_st);
323 		memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 	} else if (digest_size == SHA224_DIGEST_SIZE) {
325 		error = crypto_shash_init(desc) ?:
326 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 			crypto_shash_export(desc, (void *)&sha256_st);
328 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
329 
330 	} else if (digest_size == SHA256_DIGEST_SIZE) {
331 		error = crypto_shash_init(desc) ?:
332 			crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 			crypto_shash_export(desc, (void *)&sha256_st);
334 		memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
335 
336 	} else if (digest_size == SHA384_DIGEST_SIZE) {
337 		error = crypto_shash_init(desc) ?:
338 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 			crypto_shash_export(desc, (void *)&sha512_st);
340 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
341 
342 	} else if (digest_size == SHA512_DIGEST_SIZE) {
343 		error = crypto_shash_init(desc) ?:
344 			crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 			crypto_shash_export(desc, (void *)&sha512_st);
346 		memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
347 	} else {
348 		error = -EINVAL;
349 		pr_err("Unknown digest size %d\n", digest_size);
350 	}
351 	return error;
352 }
353 
chcr_change_order(char * buf,int ds)354 static void chcr_change_order(char *buf, int ds)
355 {
356 	int i;
357 
358 	if (ds == SHA512_DIGEST_SIZE) {
359 		for (i = 0; i < (ds / sizeof(u64)); i++)
360 			*((__be64 *)buf + i) =
361 				cpu_to_be64(*((u64 *)buf + i));
362 	} else {
363 		for (i = 0; i < (ds / sizeof(u32)); i++)
364 			*((__be32 *)buf + i) =
365 				cpu_to_be32(*((u32 *)buf + i));
366 	}
367 }
368 
is_hmac(struct crypto_tfm * tfm)369 static inline int is_hmac(struct crypto_tfm *tfm)
370 {
371 	struct crypto_alg *alg = tfm->__crt_alg;
372 	struct chcr_alg_template *chcr_crypto_alg =
373 		container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
374 			     alg.hash);
375 	if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
376 		return 1;
377 	return 0;
378 }
379 
dsgl_walk_init(struct dsgl_walk * walk,struct cpl_rx_phys_dsgl * dsgl)380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 				   struct cpl_rx_phys_dsgl *dsgl)
382 {
383 	walk->dsgl = dsgl;
384 	walk->nents = 0;
385 	walk->to = (struct phys_sge_pairs *)(dsgl + 1);
386 }
387 
dsgl_walk_end(struct dsgl_walk * walk,unsigned short qid,int pci_chan_id)388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
389 				 int pci_chan_id)
390 {
391 	struct cpl_rx_phys_dsgl *phys_cpl;
392 
393 	phys_cpl = walk->dsgl;
394 
395 	phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 				    | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 	phys_cpl->pcirlxorder_to_noofsgentr =
398 		htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 		      CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 		      CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 		      CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 		      CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 		      CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 	phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 	phys_cpl->rss_hdr_int.qid = htons(qid);
406 	phys_cpl->rss_hdr_int.hash_val = 0;
407 	phys_cpl->rss_hdr_int.channel = pci_chan_id;
408 }
409 
dsgl_walk_add_page(struct dsgl_walk * walk,size_t size,dma_addr_t addr)410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
411 					size_t size,
412 					dma_addr_t addr)
413 {
414 	int j;
415 
416 	if (!size)
417 		return;
418 	j = walk->nents;
419 	walk->to->len[j % 8] = htons(size);
420 	walk->to->addr[j % 8] = cpu_to_be64(addr);
421 	j++;
422 	if ((j % 8) == 0)
423 		walk->to++;
424 	walk->nents = j;
425 }
426 
dsgl_walk_add_sg(struct dsgl_walk * walk,struct scatterlist * sg,unsigned int slen,unsigned int skip)427 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
428 			   struct scatterlist *sg,
429 			      unsigned int slen,
430 			      unsigned int skip)
431 {
432 	int skip_len = 0;
433 	unsigned int left_size = slen, len = 0;
434 	unsigned int j = walk->nents;
435 	int offset, ent_len;
436 
437 	if (!slen)
438 		return;
439 	while (sg && skip) {
440 		if (sg_dma_len(sg) <= skip) {
441 			skip -= sg_dma_len(sg);
442 			skip_len = 0;
443 			sg = sg_next(sg);
444 		} else {
445 			skip_len = skip;
446 			skip = 0;
447 		}
448 	}
449 
450 	while (left_size && sg) {
451 		len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
452 		offset = 0;
453 		while (len) {
454 			ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
455 			walk->to->len[j % 8] = htons(ent_len);
456 			walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
457 						      offset + skip_len);
458 			offset += ent_len;
459 			len -= ent_len;
460 			j++;
461 			if ((j % 8) == 0)
462 				walk->to++;
463 		}
464 		walk->last_sg = sg;
465 		walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 					  skip_len) + skip_len;
467 		left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
468 		skip_len = 0;
469 		sg = sg_next(sg);
470 	}
471 	walk->nents = j;
472 }
473 
ulptx_walk_init(struct ulptx_walk * walk,struct ulptx_sgl * ulp)474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 				   struct ulptx_sgl *ulp)
476 {
477 	walk->sgl = ulp;
478 	walk->nents = 0;
479 	walk->pair_idx = 0;
480 	walk->pair = ulp->sge;
481 	walk->last_sg = NULL;
482 	walk->last_sg_len = 0;
483 }
484 
ulptx_walk_end(struct ulptx_walk * walk)485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
486 {
487 	walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 			      ULPTX_NSGE_V(walk->nents));
489 }
490 
491 
ulptx_walk_add_page(struct ulptx_walk * walk,size_t size,dma_addr_t addr)492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
493 					size_t size,
494 					dma_addr_t addr)
495 {
496 	if (!size)
497 		return;
498 
499 	if (walk->nents == 0) {
500 		walk->sgl->len0 = cpu_to_be32(size);
501 		walk->sgl->addr0 = cpu_to_be64(addr);
502 	} else {
503 		walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 		walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 		walk->pair_idx = !walk->pair_idx;
506 		if (!walk->pair_idx)
507 			walk->pair++;
508 	}
509 	walk->nents++;
510 }
511 
ulptx_walk_add_sg(struct ulptx_walk * walk,struct scatterlist * sg,unsigned int len,unsigned int skip)512 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
513 					struct scatterlist *sg,
514 			       unsigned int len,
515 			       unsigned int skip)
516 {
517 	int small;
518 	int skip_len = 0;
519 	unsigned int sgmin;
520 
521 	if (!len)
522 		return;
523 	while (sg && skip) {
524 		if (sg_dma_len(sg) <= skip) {
525 			skip -= sg_dma_len(sg);
526 			skip_len = 0;
527 			sg = sg_next(sg);
528 		} else {
529 			skip_len = skip;
530 			skip = 0;
531 		}
532 	}
533 	WARN(!sg, "SG should not be null here\n");
534 	if (sg && (walk->nents == 0)) {
535 		small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 		walk->sgl->len0 = cpu_to_be32(sgmin);
538 		walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
539 		walk->nents++;
540 		len -= sgmin;
541 		walk->last_sg = sg;
542 		walk->last_sg_len = sgmin + skip_len;
543 		skip_len += sgmin;
544 		if (sg_dma_len(sg) == skip_len) {
545 			sg = sg_next(sg);
546 			skip_len = 0;
547 		}
548 	}
549 
550 	while (sg && len) {
551 		small = min(sg_dma_len(sg) - skip_len, len);
552 		sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 		walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 		walk->pair->addr[walk->pair_idx] =
555 			cpu_to_be64(sg_dma_address(sg) + skip_len);
556 		walk->pair_idx = !walk->pair_idx;
557 		walk->nents++;
558 		if (!walk->pair_idx)
559 			walk->pair++;
560 		len -= sgmin;
561 		skip_len += sgmin;
562 		walk->last_sg = sg;
563 		walk->last_sg_len = skip_len;
564 		if (sg_dma_len(sg) == skip_len) {
565 			sg = sg_next(sg);
566 			skip_len = 0;
567 		}
568 	}
569 }
570 
get_cryptoalg_subtype(struct crypto_tfm * tfm)571 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
572 {
573 	struct crypto_alg *alg = tfm->__crt_alg;
574 	struct chcr_alg_template *chcr_crypto_alg =
575 		container_of(alg, struct chcr_alg_template, alg.crypto);
576 
577 	return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
578 }
579 
cxgb4_is_crypto_q_full(struct net_device * dev,unsigned int idx)580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
581 {
582 	struct adapter *adap = netdev2adap(dev);
583 	struct sge_uld_txq_info *txq_info =
584 		adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 	struct sge_uld_txq *txq;
586 	int ret = 0;
587 
588 	local_bh_disable();
589 	txq = &txq_info->uldtxq[idx];
590 	spin_lock(&txq->sendq.lock);
591 	if (txq->full)
592 		ret = -1;
593 	spin_unlock(&txq->sendq.lock);
594 	local_bh_enable();
595 	return ret;
596 }
597 
generate_copy_rrkey(struct ablk_ctx * ablkctx,struct _key_ctx * key_ctx)598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 			       struct _key_ctx *key_ctx)
600 {
601 	if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 		memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
603 	} else {
604 		memcpy(key_ctx->key,
605 		       ablkctx->key + (ablkctx->enckey_len >> 1),
606 		       ablkctx->enckey_len >> 1);
607 		memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 		       ablkctx->rrkey, ablkctx->enckey_len >> 1);
609 	}
610 	return 0;
611 }
612 
chcr_hash_ent_in_wr(struct scatterlist * src,unsigned int minsg,unsigned int space,unsigned int srcskip)613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
614 			     unsigned int minsg,
615 			     unsigned int space,
616 			     unsigned int srcskip)
617 {
618 	int srclen = 0;
619 	int srcsg = minsg;
620 	int soffset = 0, sless;
621 
622 	if (sg_dma_len(src) == srcskip) {
623 		src = sg_next(src);
624 		srcskip = 0;
625 	}
626 	while (src && space > (sgl_ent_len[srcsg + 1])) {
627 		sless = min_t(unsigned int, sg_dma_len(src) - soffset -	srcskip,
628 							CHCR_SRC_SG_SIZE);
629 		srclen += sless;
630 		soffset += sless;
631 		srcsg++;
632 		if (sg_dma_len(src) == (soffset + srcskip)) {
633 			src = sg_next(src);
634 			soffset = 0;
635 			srcskip = 0;
636 		}
637 	}
638 	return srclen;
639 }
640 
chcr_sg_ent_in_wr(struct scatterlist * src,struct scatterlist * dst,unsigned int minsg,unsigned int space,unsigned int srcskip,unsigned int dstskip)641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 			     struct scatterlist *dst,
643 			     unsigned int minsg,
644 			     unsigned int space,
645 			     unsigned int srcskip,
646 			     unsigned int dstskip)
647 {
648 	int srclen = 0, dstlen = 0;
649 	int srcsg = minsg, dstsg = minsg;
650 	int offset = 0, soffset = 0, less, sless = 0;
651 
652 	if (sg_dma_len(src) == srcskip) {
653 		src = sg_next(src);
654 		srcskip = 0;
655 	}
656 	if (sg_dma_len(dst) == dstskip) {
657 		dst = sg_next(dst);
658 		dstskip = 0;
659 	}
660 
661 	while (src && dst &&
662 	       space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 		sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
664 				CHCR_SRC_SG_SIZE);
665 		srclen += sless;
666 		srcsg++;
667 		offset = 0;
668 		while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 		       space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 			if (srclen <= dstlen)
671 				break;
672 			less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 				     dstskip, CHCR_DST_SG_SIZE);
674 			dstlen += less;
675 			offset += less;
676 			if ((offset + dstskip) == sg_dma_len(dst)) {
677 				dst = sg_next(dst);
678 				offset = 0;
679 			}
680 			dstsg++;
681 			dstskip = 0;
682 		}
683 		soffset += sless;
684 		if ((soffset + srcskip) == sg_dma_len(src)) {
685 			src = sg_next(src);
686 			srcskip = 0;
687 			soffset = 0;
688 		}
689 
690 	}
691 	return min(srclen, dstlen);
692 }
693 
chcr_cipher_fallback(struct crypto_sync_skcipher * cipher,u32 flags,struct scatterlist * src,struct scatterlist * dst,unsigned int nbytes,u8 * iv,unsigned short op_type)694 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
695 				u32 flags,
696 				struct scatterlist *src,
697 				struct scatterlist *dst,
698 				unsigned int nbytes,
699 				u8 *iv,
700 				unsigned short op_type)
701 {
702 	int err;
703 
704 	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705 
706 	skcipher_request_set_sync_tfm(subreq, cipher);
707 	skcipher_request_set_callback(subreq, flags, NULL, NULL);
708 	skcipher_request_set_crypt(subreq, src, dst,
709 				   nbytes, iv);
710 
711 	err = op_type ? crypto_skcipher_decrypt(subreq) :
712 		crypto_skcipher_encrypt(subreq);
713 	skcipher_request_zero(subreq);
714 
715 	return err;
716 
717 }
create_wreq(struct chcr_context * ctx,struct chcr_wr * chcr_req,struct crypto_async_request * req,unsigned int imm,int hash_sz,unsigned int len16,unsigned int sc_len,unsigned int lcb)718 static inline void create_wreq(struct chcr_context *ctx,
719 			       struct chcr_wr *chcr_req,
720 			       struct crypto_async_request *req,
721 			       unsigned int imm,
722 			       int hash_sz,
723 			       unsigned int len16,
724 			       unsigned int sc_len,
725 			       unsigned int lcb)
726 {
727 	struct uld_ctx *u_ctx = ULD_CTX(ctx);
728 	int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
729 
730 
731 	chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
732 	chcr_req->wreq.pld_size_hash_size =
733 		htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
734 	chcr_req->wreq.len16_pkd =
735 		htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
736 	chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
737 	chcr_req->wreq.rx_chid_to_rx_q_id =
738 		FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
739 				!!lcb, ctx->tx_qidx);
740 
741 	chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
742 						       qid);
743 	chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
744 				     ((sizeof(chcr_req->wreq)) >> 4)));
745 
746 	chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
747 	chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
748 					   sizeof(chcr_req->key_ctx) + sc_len);
749 }
750 
751 /**
752  *	create_cipher_wr - form the WR for cipher operations
753  *	@req: cipher req.
754  *	@ctx: crypto driver context of the request.
755  *	@qid: ingress qid where response of this WR should be received.
756  *	@op_type:	encryption or decryption
757  */
create_cipher_wr(struct cipher_wr_param * wrparam)758 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
759 {
760 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
761 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
762 	struct sk_buff *skb = NULL;
763 	struct chcr_wr *chcr_req;
764 	struct cpl_rx_phys_dsgl *phys_cpl;
765 	struct ulptx_sgl *ulptx;
766 	struct chcr_blkcipher_req_ctx *reqctx =
767 		ablkcipher_request_ctx(wrparam->req);
768 	unsigned int temp = 0, transhdr_len, dst_size;
769 	int error;
770 	int nents;
771 	unsigned int kctx_len;
772 	gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
773 			GFP_KERNEL : GFP_ATOMIC;
774 	struct adapter *adap = padap(c_ctx(tfm)->dev);
775 
776 	nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
777 			      reqctx->dst_ofst);
778 	dst_size = get_space_for_phys_dsgl(nents);
779 	kctx_len = roundup(ablkctx->enckey_len, 16);
780 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
781 	nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
782 				  CHCR_SRC_SG_SIZE, reqctx->src_ofst);
783 	temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
784 				     (sgl_len(nents) * 8);
785 	transhdr_len += temp;
786 	transhdr_len = roundup(transhdr_len, 16);
787 	skb = alloc_skb(SGE_MAX_WR_LEN, flags);
788 	if (!skb) {
789 		error = -ENOMEM;
790 		goto err;
791 	}
792 	chcr_req = __skb_put_zero(skb, transhdr_len);
793 	chcr_req->sec_cpl.op_ivinsrtofst =
794 		FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
795 
796 	chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
797 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
798 			FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
799 
800 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
801 			FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
802 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
803 							 ablkctx->ciph_mode,
804 							 0, 0, IV >> 1);
805 	chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
806 							  0, 1, dst_size);
807 
808 	chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
809 	if ((reqctx->op == CHCR_DECRYPT_OP) &&
810 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
811 	       CRYPTO_ALG_SUB_TYPE_CTR)) &&
812 	    (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
813 	       CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
814 		generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
815 	} else {
816 		if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
817 		    (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
818 			memcpy(chcr_req->key_ctx.key, ablkctx->key,
819 			       ablkctx->enckey_len);
820 		} else {
821 			memcpy(chcr_req->key_ctx.key, ablkctx->key +
822 			       (ablkctx->enckey_len >> 1),
823 			       ablkctx->enckey_len >> 1);
824 			memcpy(chcr_req->key_ctx.key +
825 			       (ablkctx->enckey_len >> 1),
826 			       ablkctx->key,
827 			       ablkctx->enckey_len >> 1);
828 		}
829 	}
830 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
831 	ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
832 	chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
833 	chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
834 
835 	atomic_inc(&adap->chcr_stats.cipher_rqst);
836 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
837 		+ (reqctx->imm ? (wrparam->bytes) : 0);
838 	create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
839 		    transhdr_len, temp,
840 			ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
841 	reqctx->skb = skb;
842 
843 	if (reqctx->op && (ablkctx->ciph_mode ==
844 			   CHCR_SCMD_CIPHER_MODE_AES_CBC))
845 		sg_pcopy_to_buffer(wrparam->req->src,
846 			sg_nents(wrparam->req->src), wrparam->req->info, 16,
847 			reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
848 
849 	return skb;
850 err:
851 	return ERR_PTR(error);
852 }
853 
chcr_keyctx_ck_size(unsigned int keylen)854 static inline int chcr_keyctx_ck_size(unsigned int keylen)
855 {
856 	int ck_size = 0;
857 
858 	if (keylen == AES_KEYSIZE_128)
859 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
860 	else if (keylen == AES_KEYSIZE_192)
861 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
862 	else if (keylen == AES_KEYSIZE_256)
863 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
864 	else
865 		ck_size = 0;
866 
867 	return ck_size;
868 }
chcr_cipher_fallback_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)869 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
870 				       const u8 *key,
871 				       unsigned int keylen)
872 {
873 	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
874 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
875 	int err = 0;
876 
877 	crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
878 				CRYPTO_TFM_REQ_MASK);
879 	crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
880 				cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
881 	err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
882 	tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
883 	tfm->crt_flags |=
884 		crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
885 		CRYPTO_TFM_RES_MASK;
886 	return err;
887 }
888 
chcr_aes_cbc_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)889 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
890 			       const u8 *key,
891 			       unsigned int keylen)
892 {
893 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
894 	unsigned int ck_size, context_size;
895 	u16 alignment = 0;
896 	int err;
897 
898 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
899 	if (err)
900 		goto badkey_err;
901 
902 	ck_size = chcr_keyctx_ck_size(keylen);
903 	alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
904 	memcpy(ablkctx->key, key, keylen);
905 	ablkctx->enckey_len = keylen;
906 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
907 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
908 			keylen + alignment) >> 4;
909 
910 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
911 						0, 0, context_size);
912 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
913 	return 0;
914 badkey_err:
915 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
916 	ablkctx->enckey_len = 0;
917 
918 	return err;
919 }
920 
chcr_aes_ctr_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)921 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
922 				   const u8 *key,
923 				   unsigned int keylen)
924 {
925 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926 	unsigned int ck_size, context_size;
927 	u16 alignment = 0;
928 	int err;
929 
930 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
931 	if (err)
932 		goto badkey_err;
933 	ck_size = chcr_keyctx_ck_size(keylen);
934 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
935 	memcpy(ablkctx->key, key, keylen);
936 	ablkctx->enckey_len = keylen;
937 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
938 			keylen + alignment) >> 4;
939 
940 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
941 						0, 0, context_size);
942 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
943 
944 	return 0;
945 badkey_err:
946 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
947 	ablkctx->enckey_len = 0;
948 
949 	return err;
950 }
951 
chcr_aes_rfc3686_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int keylen)952 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
953 				   const u8 *key,
954 				   unsigned int keylen)
955 {
956 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
957 	unsigned int ck_size, context_size;
958 	u16 alignment = 0;
959 	int err;
960 
961 	if (keylen < CTR_RFC3686_NONCE_SIZE)
962 		return -EINVAL;
963 	memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
964 	       CTR_RFC3686_NONCE_SIZE);
965 
966 	keylen -= CTR_RFC3686_NONCE_SIZE;
967 	err = chcr_cipher_fallback_setkey(cipher, key, keylen);
968 	if (err)
969 		goto badkey_err;
970 
971 	ck_size = chcr_keyctx_ck_size(keylen);
972 	alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973 	memcpy(ablkctx->key, key, keylen);
974 	ablkctx->enckey_len = keylen;
975 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976 			keylen + alignment) >> 4;
977 
978 	ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
979 						0, 0, context_size);
980 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
981 
982 	return 0;
983 badkey_err:
984 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
985 	ablkctx->enckey_len = 0;
986 
987 	return err;
988 }
ctr_add_iv(u8 * dstiv,u8 * srciv,u32 add)989 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
990 {
991 	unsigned int size = AES_BLOCK_SIZE;
992 	__be32 *b = (__be32 *)(dstiv + size);
993 	u32 c, prev;
994 
995 	memcpy(dstiv, srciv, AES_BLOCK_SIZE);
996 	for (; size >= 4; size -= 4) {
997 		prev = be32_to_cpu(*--b);
998 		c = prev + add;
999 		*b = cpu_to_be32(c);
1000 		if (prev < c)
1001 			break;
1002 		add = 1;
1003 	}
1004 
1005 }
1006 
adjust_ctr_overflow(u8 * iv,u32 bytes)1007 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1008 {
1009 	__be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1010 	u64 c;
1011 	u32 temp = be32_to_cpu(*--b);
1012 
1013 	temp = ~temp;
1014 	c = (u64)temp +  1; // No of block can processed withou overflow
1015 	if ((bytes / AES_BLOCK_SIZE) > c)
1016 		bytes = c * AES_BLOCK_SIZE;
1017 	return bytes;
1018 }
1019 
chcr_update_tweak(struct ablkcipher_request * req,u8 * iv,u32 isfinal)1020 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1021 			     u32 isfinal)
1022 {
1023 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1024 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1026 	struct crypto_aes_ctx aes;
1027 	int ret, i;
1028 	u8 *key;
1029 	unsigned int keylen;
1030 	int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031 	int round8 = round / 8;
1032 
1033 	memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1034 
1035 	keylen = ablkctx->enckey_len / 2;
1036 	key = ablkctx->key + keylen;
1037 	ret = aes_expandkey(&aes, key, keylen);
1038 	if (ret)
1039 		return ret;
1040 	aes_encrypt(&aes, iv, iv);
1041 	for (i = 0; i < round8; i++)
1042 		gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1043 
1044 	for (i = 0; i < (round % 8); i++)
1045 		gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1046 
1047 	if (!isfinal)
1048 		aes_decrypt(&aes, iv, iv);
1049 
1050 	memzero_explicit(&aes, sizeof(aes));
1051 	return 0;
1052 }
1053 
chcr_update_cipher_iv(struct ablkcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1054 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1055 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1056 {
1057 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1058 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1059 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1060 	int ret = 0;
1061 
1062 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1063 		ctr_add_iv(iv, req->info, (reqctx->processed /
1064 			   AES_BLOCK_SIZE));
1065 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1066 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1067 			CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1068 						AES_BLOCK_SIZE) + 1);
1069 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1070 		ret = chcr_update_tweak(req, iv, 0);
1071 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1072 		if (reqctx->op)
1073 			/*Updated before sending last WR*/
1074 			memcpy(iv, req->info, AES_BLOCK_SIZE);
1075 		else
1076 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1077 	}
1078 
1079 	return ret;
1080 
1081 }
1082 
1083 /* We need separate function for final iv because in rfc3686  Initial counter
1084  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1085  * for subsequent update requests
1086  */
1087 
chcr_final_cipher_iv(struct ablkcipher_request * req,struct cpl_fw6_pld * fw6_pld,u8 * iv)1088 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1089 				   struct cpl_fw6_pld *fw6_pld, u8 *iv)
1090 {
1091 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1092 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1093 	int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1094 	int ret = 0;
1095 
1096 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1097 		ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
1098 						       AES_BLOCK_SIZE));
1099 	else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1100 		ret = chcr_update_tweak(req, iv, 1);
1101 	else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1102 		/*Already updated for Decrypt*/
1103 		if (!reqctx->op)
1104 			memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1105 
1106 	}
1107 	return ret;
1108 
1109 }
1110 
chcr_handle_cipher_resp(struct ablkcipher_request * req,unsigned char * input,int err)1111 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1112 				   unsigned char *input, int err)
1113 {
1114 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1115 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1116 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1117 	struct sk_buff *skb;
1118 	struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1119 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1120 	struct  cipher_wr_param wrparam;
1121 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1122 	int bytes;
1123 
1124 	if (err)
1125 		goto unmap;
1126 	if (req->nbytes == reqctx->processed) {
1127 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1128 				      req);
1129 		err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1130 		goto complete;
1131 	}
1132 
1133 	if (!reqctx->imm) {
1134 		bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1135 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1136 					  reqctx->src_ofst, reqctx->dst_ofst);
1137 		if ((bytes + reqctx->processed) >= req->nbytes)
1138 			bytes  = req->nbytes - reqctx->processed;
1139 		else
1140 			bytes = rounddown(bytes, 16);
1141 	} else {
1142 		/*CTR mode counter overfloa*/
1143 		bytes  = req->nbytes - reqctx->processed;
1144 	}
1145 	err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1146 	if (err)
1147 		goto unmap;
1148 
1149 	if (unlikely(bytes == 0)) {
1150 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1151 				      req);
1152 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1153 				     req->base.flags,
1154 				     req->src,
1155 				     req->dst,
1156 				     req->nbytes,
1157 				     req->info,
1158 				     reqctx->op);
1159 		goto complete;
1160 	}
1161 
1162 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1163 	    CRYPTO_ALG_SUB_TYPE_CTR)
1164 		bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1165 	wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1166 	wrparam.req = req;
1167 	wrparam.bytes = bytes;
1168 	skb = create_cipher_wr(&wrparam);
1169 	if (IS_ERR(skb)) {
1170 		pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1171 		err = PTR_ERR(skb);
1172 		goto unmap;
1173 	}
1174 	skb->dev = u_ctx->lldi.ports[0];
1175 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1176 	chcr_send_wr(skb);
1177 	reqctx->last_req_len = bytes;
1178 	reqctx->processed += bytes;
1179 	return 0;
1180 unmap:
1181 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1182 complete:
1183 	chcr_dec_wrcount(dev);
1184 	req->base.complete(&req->base, err);
1185 	return err;
1186 }
1187 
process_cipher(struct ablkcipher_request * req,unsigned short qid,struct sk_buff ** skb,unsigned short op_type)1188 static int process_cipher(struct ablkcipher_request *req,
1189 				  unsigned short qid,
1190 				  struct sk_buff **skb,
1191 				  unsigned short op_type)
1192 {
1193 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1194 	unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1195 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1196 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1197 	struct	cipher_wr_param wrparam;
1198 	int bytes, err = -EINVAL;
1199 
1200 	reqctx->processed = 0;
1201 	if (!req->info)
1202 		goto error;
1203 	if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1204 	    (req->nbytes == 0) ||
1205 	    (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1206 		pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1207 		       ablkctx->enckey_len, req->nbytes, ivsize);
1208 		goto error;
1209 	}
1210 
1211 	err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1212 	if (err)
1213 		goto error;
1214 	if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1215 					    AES_MIN_KEY_SIZE +
1216 					    sizeof(struct cpl_rx_phys_dsgl) +
1217 					/*Min dsgl size*/
1218 					    32))) {
1219 		/* Can be sent as Imm*/
1220 		unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1221 
1222 		dnents = sg_nents_xlen(req->dst, req->nbytes,
1223 				       CHCR_DST_SG_SIZE, 0);
1224 		phys_dsgl = get_space_for_phys_dsgl(dnents);
1225 		kctx_len = roundup(ablkctx->enckey_len, 16);
1226 		transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1227 		reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1228 			SGE_MAX_WR_LEN;
1229 		bytes = IV + req->nbytes;
1230 
1231 	} else {
1232 		reqctx->imm = 0;
1233 	}
1234 
1235 	if (!reqctx->imm) {
1236 		bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1237 					  CIP_SPACE_LEFT(ablkctx->enckey_len),
1238 					  0, 0);
1239 		if ((bytes + reqctx->processed) >= req->nbytes)
1240 			bytes  = req->nbytes - reqctx->processed;
1241 		else
1242 			bytes = rounddown(bytes, 16);
1243 	} else {
1244 		bytes = req->nbytes;
1245 	}
1246 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1247 	    CRYPTO_ALG_SUB_TYPE_CTR) {
1248 		bytes = adjust_ctr_overflow(req->info, bytes);
1249 	}
1250 	if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1251 	    CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1252 		memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1253 		memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1254 				CTR_RFC3686_IV_SIZE);
1255 
1256 		/* initialize counter portion of counter block */
1257 		*(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1258 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1259 
1260 	} else {
1261 
1262 		memcpy(reqctx->iv, req->info, IV);
1263 	}
1264 	if (unlikely(bytes == 0)) {
1265 		chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1266 				      req);
1267 		err = chcr_cipher_fallback(ablkctx->sw_cipher,
1268 					   req->base.flags,
1269 					   req->src,
1270 					   req->dst,
1271 					   req->nbytes,
1272 					   reqctx->iv,
1273 					   op_type);
1274 		goto error;
1275 	}
1276 	reqctx->op = op_type;
1277 	reqctx->srcsg = req->src;
1278 	reqctx->dstsg = req->dst;
1279 	reqctx->src_ofst = 0;
1280 	reqctx->dst_ofst = 0;
1281 	wrparam.qid = qid;
1282 	wrparam.req = req;
1283 	wrparam.bytes = bytes;
1284 	*skb = create_cipher_wr(&wrparam);
1285 	if (IS_ERR(*skb)) {
1286 		err = PTR_ERR(*skb);
1287 		goto unmap;
1288 	}
1289 	reqctx->processed = bytes;
1290 	reqctx->last_req_len = bytes;
1291 
1292 	return 0;
1293 unmap:
1294 	chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1295 error:
1296 	return err;
1297 }
1298 
chcr_aes_encrypt(struct ablkcipher_request * req)1299 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1300 {
1301 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1302 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1303 	struct sk_buff *skb = NULL;
1304 	int err, isfull = 0;
1305 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1306 
1307 	err = chcr_inc_wrcount(dev);
1308 	if (err)
1309 		return -ENXIO;
1310 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1311 					    c_ctx(tfm)->tx_qidx))) {
1312 		isfull = 1;
1313 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1314 			err = -ENOSPC;
1315 			goto error;
1316 		}
1317 	}
1318 
1319 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1320 			     &skb, CHCR_ENCRYPT_OP);
1321 	if (err || !skb)
1322 		return  err;
1323 	skb->dev = u_ctx->lldi.ports[0];
1324 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1325 	chcr_send_wr(skb);
1326 	return isfull ? -EBUSY : -EINPROGRESS;
1327 error:
1328 	chcr_dec_wrcount(dev);
1329 	return err;
1330 }
1331 
chcr_aes_decrypt(struct ablkcipher_request * req)1332 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1333 {
1334 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1335 	struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1336 	struct chcr_dev *dev = c_ctx(tfm)->dev;
1337 	struct sk_buff *skb = NULL;
1338 	int err, isfull = 0;
1339 
1340 	err = chcr_inc_wrcount(dev);
1341 	if (err)
1342 		return -ENXIO;
1343 
1344 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1345 					    c_ctx(tfm)->tx_qidx))) {
1346 		isfull = 1;
1347 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1348 			return -ENOSPC;
1349 	}
1350 
1351 	err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1352 			     &skb, CHCR_DECRYPT_OP);
1353 	if (err || !skb)
1354 		return err;
1355 	skb->dev = u_ctx->lldi.ports[0];
1356 	set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1357 	chcr_send_wr(skb);
1358 	return isfull ? -EBUSY : -EINPROGRESS;
1359 }
1360 
chcr_device_init(struct chcr_context * ctx)1361 static int chcr_device_init(struct chcr_context *ctx)
1362 {
1363 	struct uld_ctx *u_ctx = NULL;
1364 	unsigned int id;
1365 	int txq_perchan, txq_idx, ntxq;
1366 	int err = 0, rxq_perchan, rxq_idx;
1367 
1368 	id = smp_processor_id();
1369 	if (!ctx->dev) {
1370 		u_ctx = assign_chcr_device();
1371 		if (!u_ctx) {
1372 			err = -ENXIO;
1373 			pr_err("chcr device assignment fails\n");
1374 			goto out;
1375 		}
1376 		ctx->dev = &u_ctx->dev;
1377 		ntxq = u_ctx->lldi.ntxq;
1378 		rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1379 		txq_perchan = ntxq / u_ctx->lldi.nchan;
1380 		spin_lock(&ctx->dev->lock_chcr_dev);
1381 		ctx->tx_chan_id = ctx->dev->tx_channel_id;
1382 		ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1383 		spin_unlock(&ctx->dev->lock_chcr_dev);
1384 		rxq_idx = ctx->tx_chan_id * rxq_perchan;
1385 		rxq_idx += id % rxq_perchan;
1386 		txq_idx = ctx->tx_chan_id * txq_perchan;
1387 		txq_idx += id % txq_perchan;
1388 		ctx->rx_qidx = rxq_idx;
1389 		ctx->tx_qidx = txq_idx;
1390 		/* Channel Id used by SGE to forward packet to Host.
1391 		 * Same value should be used in cpl_fw6_pld RSS_CH field
1392 		 * by FW. Driver programs PCI channel ID to be used in fw
1393 		 * at the time of queue allocation with value "pi->tx_chan"
1394 		 */
1395 		ctx->pci_chan_id = txq_idx / txq_perchan;
1396 	}
1397 out:
1398 	return err;
1399 }
1400 
chcr_cra_init(struct crypto_tfm * tfm)1401 static int chcr_cra_init(struct crypto_tfm *tfm)
1402 {
1403 	struct crypto_alg *alg = tfm->__crt_alg;
1404 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1406 
1407 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1408 				CRYPTO_ALG_NEED_FALLBACK);
1409 	if (IS_ERR(ablkctx->sw_cipher)) {
1410 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1411 		return PTR_ERR(ablkctx->sw_cipher);
1412 	}
1413 
1414 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1415 	return chcr_device_init(crypto_tfm_ctx(tfm));
1416 }
1417 
chcr_rfc3686_init(struct crypto_tfm * tfm)1418 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1419 {
1420 	struct crypto_alg *alg = tfm->__crt_alg;
1421 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1422 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1423 
1424 	/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1425 	 * cannot be used as fallback in chcr_handle_cipher_response
1426 	 */
1427 	ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1428 				CRYPTO_ALG_NEED_FALLBACK);
1429 	if (IS_ERR(ablkctx->sw_cipher)) {
1430 		pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1431 		return PTR_ERR(ablkctx->sw_cipher);
1432 	}
1433 	tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1434 	return chcr_device_init(crypto_tfm_ctx(tfm));
1435 }
1436 
1437 
chcr_cra_exit(struct crypto_tfm * tfm)1438 static void chcr_cra_exit(struct crypto_tfm *tfm)
1439 {
1440 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1441 	struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1442 
1443 	crypto_free_sync_skcipher(ablkctx->sw_cipher);
1444 }
1445 
get_alg_config(struct algo_param * params,unsigned int auth_size)1446 static int get_alg_config(struct algo_param *params,
1447 			  unsigned int auth_size)
1448 {
1449 	switch (auth_size) {
1450 	case SHA1_DIGEST_SIZE:
1451 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1452 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1453 		params->result_size = SHA1_DIGEST_SIZE;
1454 		break;
1455 	case SHA224_DIGEST_SIZE:
1456 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1457 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1458 		params->result_size = SHA256_DIGEST_SIZE;
1459 		break;
1460 	case SHA256_DIGEST_SIZE:
1461 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1462 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1463 		params->result_size = SHA256_DIGEST_SIZE;
1464 		break;
1465 	case SHA384_DIGEST_SIZE:
1466 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1467 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1468 		params->result_size = SHA512_DIGEST_SIZE;
1469 		break;
1470 	case SHA512_DIGEST_SIZE:
1471 		params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1472 		params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1473 		params->result_size = SHA512_DIGEST_SIZE;
1474 		break;
1475 	default:
1476 		pr_err("chcr : ERROR, unsupported digest size\n");
1477 		return -EINVAL;
1478 	}
1479 	return 0;
1480 }
1481 
chcr_free_shash(struct crypto_shash * base_hash)1482 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1483 {
1484 		crypto_free_shash(base_hash);
1485 }
1486 
1487 /**
1488  *	create_hash_wr - Create hash work request
1489  *	@req - Cipher req base
1490  */
create_hash_wr(struct ahash_request * req,struct hash_wr_param * param)1491 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1492 				      struct hash_wr_param *param)
1493 {
1494 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1495 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1496 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1497 	struct sk_buff *skb = NULL;
1498 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1499 	struct chcr_wr *chcr_req;
1500 	struct ulptx_sgl *ulptx;
1501 	unsigned int nents = 0, transhdr_len;
1502 	unsigned int temp = 0;
1503 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1504 		GFP_ATOMIC;
1505 	struct adapter *adap = padap(h_ctx(tfm)->dev);
1506 	int error = 0;
1507 
1508 	transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1509 	req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1510 				param->sg_len) <= SGE_MAX_WR_LEN;
1511 	nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1512 		      CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1513 	nents += param->bfr_len ? 1 : 0;
1514 	transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1515 				param->sg_len, 16) : (sgl_len(nents) * 8);
1516 	transhdr_len = roundup(transhdr_len, 16);
1517 
1518 	skb = alloc_skb(transhdr_len, flags);
1519 	if (!skb)
1520 		return ERR_PTR(-ENOMEM);
1521 	chcr_req = __skb_put_zero(skb, transhdr_len);
1522 
1523 	chcr_req->sec_cpl.op_ivinsrtofst =
1524 		FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1525 	chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1526 
1527 	chcr_req->sec_cpl.aadstart_cipherstop_hi =
1528 		FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1529 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
1530 		FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1531 	chcr_req->sec_cpl.seqno_numivs =
1532 		FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1533 					 param->opad_needed, 0);
1534 
1535 	chcr_req->sec_cpl.ivgen_hdrlen =
1536 		FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1537 
1538 	memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1539 	       param->alg_prm.result_size);
1540 
1541 	if (param->opad_needed)
1542 		memcpy(chcr_req->key_ctx.key +
1543 		       ((param->alg_prm.result_size <= 32) ? 32 :
1544 			CHCR_HASH_MAX_DIGEST_SIZE),
1545 		       hmacctx->opad, param->alg_prm.result_size);
1546 
1547 	chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1548 					    param->alg_prm.mk_size, 0,
1549 					    param->opad_needed,
1550 					    ((param->kctx_len +
1551 					     sizeof(chcr_req->key_ctx)) >> 4));
1552 	chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1553 	ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1554 				     DUMMY_BYTES);
1555 	if (param->bfr_len != 0) {
1556 		req_ctx->hctx_wr.dma_addr =
1557 			dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1558 				       param->bfr_len, DMA_TO_DEVICE);
1559 		if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1560 				       req_ctx->hctx_wr. dma_addr)) {
1561 			error = -ENOMEM;
1562 			goto err;
1563 		}
1564 		req_ctx->hctx_wr.dma_len = param->bfr_len;
1565 	} else {
1566 		req_ctx->hctx_wr.dma_addr = 0;
1567 	}
1568 	chcr_add_hash_src_ent(req, ulptx, param);
1569 	/* Request upto max wr size */
1570 	temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1571 				(param->sg_len + param->bfr_len) : 0);
1572 	atomic_inc(&adap->chcr_stats.digest_rqst);
1573 	create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1574 		    param->hash_size, transhdr_len,
1575 		    temp,  0);
1576 	req_ctx->hctx_wr.skb = skb;
1577 	return skb;
1578 err:
1579 	kfree_skb(skb);
1580 	return  ERR_PTR(error);
1581 }
1582 
chcr_ahash_update(struct ahash_request * req)1583 static int chcr_ahash_update(struct ahash_request *req)
1584 {
1585 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1586 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1587 	struct uld_ctx *u_ctx = NULL;
1588 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1589 	struct sk_buff *skb;
1590 	u8 remainder = 0, bs;
1591 	unsigned int nbytes = req->nbytes;
1592 	struct hash_wr_param params;
1593 	int error, isfull = 0;
1594 
1595 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1596 	u_ctx = ULD_CTX(h_ctx(rtfm));
1597 
1598 	if (nbytes + req_ctx->reqlen >= bs) {
1599 		remainder = (nbytes + req_ctx->reqlen) % bs;
1600 		nbytes = nbytes + req_ctx->reqlen - remainder;
1601 	} else {
1602 		sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1603 				   + req_ctx->reqlen, nbytes, 0);
1604 		req_ctx->reqlen += nbytes;
1605 		return 0;
1606 	}
1607 	error = chcr_inc_wrcount(dev);
1608 	if (error)
1609 		return -ENXIO;
1610 	/* Detach state for CHCR means lldi or padap is freed. Increasing
1611 	 * inflight count for dev guarantees that lldi and padap is valid
1612 	 */
1613 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1614 					    h_ctx(rtfm)->tx_qidx))) {
1615 		isfull = 1;
1616 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1617 			error = -ENOSPC;
1618 			goto err;
1619 		}
1620 	}
1621 
1622 	chcr_init_hctx_per_wr(req_ctx);
1623 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1624 	if (error) {
1625 		error = -ENOMEM;
1626 		goto err;
1627 	}
1628 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1629 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1630 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1631 				     HASH_SPACE_LEFT(params.kctx_len), 0);
1632 	if (params.sg_len > req->nbytes)
1633 		params.sg_len = req->nbytes;
1634 	params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1635 			req_ctx->reqlen;
1636 	params.opad_needed = 0;
1637 	params.more = 1;
1638 	params.last = 0;
1639 	params.bfr_len = req_ctx->reqlen;
1640 	params.scmd1 = 0;
1641 	req_ctx->hctx_wr.srcsg = req->src;
1642 
1643 	params.hash_size = params.alg_prm.result_size;
1644 	req_ctx->data_len += params.sg_len + params.bfr_len;
1645 	skb = create_hash_wr(req, &params);
1646 	if (IS_ERR(skb)) {
1647 		error = PTR_ERR(skb);
1648 		goto unmap;
1649 	}
1650 
1651 	req_ctx->hctx_wr.processed += params.sg_len;
1652 	if (remainder) {
1653 		/* Swap buffers */
1654 		swap(req_ctx->reqbfr, req_ctx->skbfr);
1655 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1656 				   req_ctx->reqbfr, remainder, req->nbytes -
1657 				   remainder);
1658 	}
1659 	req_ctx->reqlen = remainder;
1660 	skb->dev = u_ctx->lldi.ports[0];
1661 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1662 	chcr_send_wr(skb);
1663 
1664 	return isfull ? -EBUSY : -EINPROGRESS;
1665 unmap:
1666 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1667 err:
1668 	chcr_dec_wrcount(dev);
1669 	return error;
1670 }
1671 
create_last_hash_block(char * bfr_ptr,unsigned int bs,u64 scmd1)1672 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1673 {
1674 	memset(bfr_ptr, 0, bs);
1675 	*bfr_ptr = 0x80;
1676 	if (bs == 64)
1677 		*(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1678 	else
1679 		*(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1680 }
1681 
chcr_ahash_final(struct ahash_request * req)1682 static int chcr_ahash_final(struct ahash_request *req)
1683 {
1684 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1685 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1686 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1687 	struct hash_wr_param params;
1688 	struct sk_buff *skb;
1689 	struct uld_ctx *u_ctx = NULL;
1690 	u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1691 	int error = -EINVAL;
1692 
1693 	error = chcr_inc_wrcount(dev);
1694 	if (error)
1695 		return -ENXIO;
1696 
1697 	chcr_init_hctx_per_wr(req_ctx);
1698 	u_ctx = ULD_CTX(h_ctx(rtfm));
1699 	if (is_hmac(crypto_ahash_tfm(rtfm)))
1700 		params.opad_needed = 1;
1701 	else
1702 		params.opad_needed = 0;
1703 	params.sg_len = 0;
1704 	req_ctx->hctx_wr.isfinal = 1;
1705 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1706 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1708 		params.opad_needed = 1;
1709 		params.kctx_len *= 2;
1710 	} else {
1711 		params.opad_needed = 0;
1712 	}
1713 
1714 	req_ctx->hctx_wr.result = 1;
1715 	params.bfr_len = req_ctx->reqlen;
1716 	req_ctx->data_len += params.bfr_len + params.sg_len;
1717 	req_ctx->hctx_wr.srcsg = req->src;
1718 	if (req_ctx->reqlen == 0) {
1719 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1720 		params.last = 0;
1721 		params.more = 1;
1722 		params.scmd1 = 0;
1723 		params.bfr_len = bs;
1724 
1725 	} else {
1726 		params.scmd1 = req_ctx->data_len;
1727 		params.last = 1;
1728 		params.more = 0;
1729 	}
1730 	params.hash_size = crypto_ahash_digestsize(rtfm);
1731 	skb = create_hash_wr(req, &params);
1732 	if (IS_ERR(skb)) {
1733 		error = PTR_ERR(skb);
1734 		goto err;
1735 	}
1736 	req_ctx->reqlen = 0;
1737 	skb->dev = u_ctx->lldi.ports[0];
1738 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1739 	chcr_send_wr(skb);
1740 	return -EINPROGRESS;
1741 err:
1742 	chcr_dec_wrcount(dev);
1743 	return error;
1744 }
1745 
chcr_ahash_finup(struct ahash_request * req)1746 static int chcr_ahash_finup(struct ahash_request *req)
1747 {
1748 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1749 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1750 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1751 	struct uld_ctx *u_ctx = NULL;
1752 	struct sk_buff *skb;
1753 	struct hash_wr_param params;
1754 	u8  bs;
1755 	int error, isfull = 0;
1756 
1757 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1758 	u_ctx = ULD_CTX(h_ctx(rtfm));
1759 	error = chcr_inc_wrcount(dev);
1760 	if (error)
1761 		return -ENXIO;
1762 
1763 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1764 					    h_ctx(rtfm)->tx_qidx))) {
1765 		isfull = 1;
1766 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1767 			error = -ENOSPC;
1768 			goto err;
1769 		}
1770 	}
1771 	chcr_init_hctx_per_wr(req_ctx);
1772 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1773 	if (error) {
1774 		error = -ENOMEM;
1775 		goto err;
1776 	}
1777 
1778 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1779 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1780 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1781 		params.kctx_len *= 2;
1782 		params.opad_needed = 1;
1783 	} else {
1784 		params.opad_needed = 0;
1785 	}
1786 
1787 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1788 				    HASH_SPACE_LEFT(params.kctx_len), 0);
1789 	if (params.sg_len < req->nbytes) {
1790 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1791 			params.kctx_len /= 2;
1792 			params.opad_needed = 0;
1793 		}
1794 		params.last = 0;
1795 		params.more = 1;
1796 		params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1797 					- req_ctx->reqlen;
1798 		params.hash_size = params.alg_prm.result_size;
1799 		params.scmd1 = 0;
1800 	} else {
1801 		params.last = 1;
1802 		params.more = 0;
1803 		params.sg_len = req->nbytes;
1804 		params.hash_size = crypto_ahash_digestsize(rtfm);
1805 		params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1806 				params.sg_len;
1807 	}
1808 	params.bfr_len = req_ctx->reqlen;
1809 	req_ctx->data_len += params.bfr_len + params.sg_len;
1810 	req_ctx->hctx_wr.result = 1;
1811 	req_ctx->hctx_wr.srcsg = req->src;
1812 	if ((req_ctx->reqlen + req->nbytes) == 0) {
1813 		create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1814 		params.last = 0;
1815 		params.more = 1;
1816 		params.scmd1 = 0;
1817 		params.bfr_len = bs;
1818 	}
1819 	skb = create_hash_wr(req, &params);
1820 	if (IS_ERR(skb)) {
1821 		error = PTR_ERR(skb);
1822 		goto unmap;
1823 	}
1824 	req_ctx->reqlen = 0;
1825 	req_ctx->hctx_wr.processed += params.sg_len;
1826 	skb->dev = u_ctx->lldi.ports[0];
1827 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1828 	chcr_send_wr(skb);
1829 
1830 	return isfull ? -EBUSY : -EINPROGRESS;
1831 unmap:
1832 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1833 err:
1834 	chcr_dec_wrcount(dev);
1835 	return error;
1836 }
1837 
chcr_ahash_digest(struct ahash_request * req)1838 static int chcr_ahash_digest(struct ahash_request *req)
1839 {
1840 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1841 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1842 	struct chcr_dev *dev = h_ctx(rtfm)->dev;
1843 	struct uld_ctx *u_ctx = NULL;
1844 	struct sk_buff *skb;
1845 	struct hash_wr_param params;
1846 	u8  bs;
1847 	int error, isfull = 0;
1848 
1849 	rtfm->init(req);
1850 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1851 	error = chcr_inc_wrcount(dev);
1852 	if (error)
1853 		return -ENXIO;
1854 
1855 	u_ctx = ULD_CTX(h_ctx(rtfm));
1856 	if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1857 					    h_ctx(rtfm)->tx_qidx))) {
1858 		isfull = 1;
1859 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1860 			error = -ENOSPC;
1861 			goto err;
1862 		}
1863 	}
1864 
1865 	chcr_init_hctx_per_wr(req_ctx);
1866 	error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1867 	if (error) {
1868 		error = -ENOMEM;
1869 		goto err;
1870 	}
1871 
1872 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1873 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1874 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1875 		params.kctx_len *= 2;
1876 		params.opad_needed = 1;
1877 	} else {
1878 		params.opad_needed = 0;
1879 	}
1880 	params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1881 				HASH_SPACE_LEFT(params.kctx_len), 0);
1882 	if (params.sg_len < req->nbytes) {
1883 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1884 			params.kctx_len /= 2;
1885 			params.opad_needed = 0;
1886 		}
1887 		params.last = 0;
1888 		params.more = 1;
1889 		params.scmd1 = 0;
1890 		params.sg_len = rounddown(params.sg_len, bs);
1891 		params.hash_size = params.alg_prm.result_size;
1892 	} else {
1893 		params.sg_len = req->nbytes;
1894 		params.hash_size = crypto_ahash_digestsize(rtfm);
1895 		params.last = 1;
1896 		params.more = 0;
1897 		params.scmd1 = req->nbytes + req_ctx->data_len;
1898 
1899 	}
1900 	params.bfr_len = 0;
1901 	req_ctx->hctx_wr.result = 1;
1902 	req_ctx->hctx_wr.srcsg = req->src;
1903 	req_ctx->data_len += params.bfr_len + params.sg_len;
1904 
1905 	if (req->nbytes == 0) {
1906 		create_last_hash_block(req_ctx->reqbfr, bs, 0);
1907 		params.more = 1;
1908 		params.bfr_len = bs;
1909 	}
1910 
1911 	skb = create_hash_wr(req, &params);
1912 	if (IS_ERR(skb)) {
1913 		error = PTR_ERR(skb);
1914 		goto unmap;
1915 	}
1916 	req_ctx->hctx_wr.processed += params.sg_len;
1917 	skb->dev = u_ctx->lldi.ports[0];
1918 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1919 	chcr_send_wr(skb);
1920 	return isfull ? -EBUSY : -EINPROGRESS;
1921 unmap:
1922 	chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1923 err:
1924 	chcr_dec_wrcount(dev);
1925 	return error;
1926 }
1927 
chcr_ahash_continue(struct ahash_request * req)1928 static int chcr_ahash_continue(struct ahash_request *req)
1929 {
1930 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1931 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1932 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1933 	struct uld_ctx *u_ctx = NULL;
1934 	struct sk_buff *skb;
1935 	struct hash_wr_param params;
1936 	u8  bs;
1937 	int error;
1938 
1939 	bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1940 	u_ctx = ULD_CTX(h_ctx(rtfm));
1941 	get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1942 	params.kctx_len = roundup(params.alg_prm.result_size, 16);
1943 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
1944 		params.kctx_len *= 2;
1945 		params.opad_needed = 1;
1946 	} else {
1947 		params.opad_needed = 0;
1948 	}
1949 	params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1950 					    HASH_SPACE_LEFT(params.kctx_len),
1951 					    hctx_wr->src_ofst);
1952 	if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1953 		params.sg_len = req->nbytes - hctx_wr->processed;
1954 	if (!hctx_wr->result ||
1955 	    ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1956 		if (is_hmac(crypto_ahash_tfm(rtfm))) {
1957 			params.kctx_len /= 2;
1958 			params.opad_needed = 0;
1959 		}
1960 		params.last = 0;
1961 		params.more = 1;
1962 		params.sg_len = rounddown(params.sg_len, bs);
1963 		params.hash_size = params.alg_prm.result_size;
1964 		params.scmd1 = 0;
1965 	} else {
1966 		params.last = 1;
1967 		params.more = 0;
1968 		params.hash_size = crypto_ahash_digestsize(rtfm);
1969 		params.scmd1 = reqctx->data_len + params.sg_len;
1970 	}
1971 	params.bfr_len = 0;
1972 	reqctx->data_len += params.sg_len;
1973 	skb = create_hash_wr(req, &params);
1974 	if (IS_ERR(skb)) {
1975 		error = PTR_ERR(skb);
1976 		goto err;
1977 	}
1978 	hctx_wr->processed += params.sg_len;
1979 	skb->dev = u_ctx->lldi.ports[0];
1980 	set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1981 	chcr_send_wr(skb);
1982 	return 0;
1983 err:
1984 	return error;
1985 }
1986 
chcr_handle_ahash_resp(struct ahash_request * req,unsigned char * input,int err)1987 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1988 					  unsigned char *input,
1989 					  int err)
1990 {
1991 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1992 	struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1993 	int digestsize, updated_digestsize;
1994 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1995 	struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1996 	struct chcr_dev *dev = h_ctx(tfm)->dev;
1997 
1998 	if (input == NULL)
1999 		goto out;
2000 	digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2001 	updated_digestsize = digestsize;
2002 	if (digestsize == SHA224_DIGEST_SIZE)
2003 		updated_digestsize = SHA256_DIGEST_SIZE;
2004 	else if (digestsize == SHA384_DIGEST_SIZE)
2005 		updated_digestsize = SHA512_DIGEST_SIZE;
2006 
2007 	if (hctx_wr->dma_addr) {
2008 		dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2009 				 hctx_wr->dma_len, DMA_TO_DEVICE);
2010 		hctx_wr->dma_addr = 0;
2011 	}
2012 	if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2013 				 req->nbytes)) {
2014 		if (hctx_wr->result == 1) {
2015 			hctx_wr->result = 0;
2016 			memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2017 			       digestsize);
2018 		} else {
2019 			memcpy(reqctx->partial_hash,
2020 			       input + sizeof(struct cpl_fw6_pld),
2021 			       updated_digestsize);
2022 
2023 		}
2024 		goto unmap;
2025 	}
2026 	memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2027 	       updated_digestsize);
2028 
2029 	err = chcr_ahash_continue(req);
2030 	if (err)
2031 		goto unmap;
2032 	return;
2033 unmap:
2034 	if (hctx_wr->is_sg_map)
2035 		chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2036 
2037 
2038 out:
2039 	chcr_dec_wrcount(dev);
2040 	req->base.complete(&req->base, err);
2041 }
2042 
2043 /*
2044  *	chcr_handle_resp - Unmap the DMA buffers associated with the request
2045  *	@req: crypto request
2046  */
chcr_handle_resp(struct crypto_async_request * req,unsigned char * input,int err)2047 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2048 			 int err)
2049 {
2050 	struct crypto_tfm *tfm = req->tfm;
2051 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2052 	struct adapter *adap = padap(ctx->dev);
2053 
2054 	switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2055 	case CRYPTO_ALG_TYPE_AEAD:
2056 		err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2057 		break;
2058 
2059 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2060 		 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2061 					       input, err);
2062 		break;
2063 	case CRYPTO_ALG_TYPE_AHASH:
2064 		chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2065 		}
2066 	atomic_inc(&adap->chcr_stats.complete);
2067 	return err;
2068 }
chcr_ahash_export(struct ahash_request * areq,void * out)2069 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2070 {
2071 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2072 	struct chcr_ahash_req_ctx *state = out;
2073 
2074 	state->reqlen = req_ctx->reqlen;
2075 	state->data_len = req_ctx->data_len;
2076 	memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2077 	memcpy(state->partial_hash, req_ctx->partial_hash,
2078 	       CHCR_HASH_MAX_DIGEST_SIZE);
2079 	chcr_init_hctx_per_wr(state);
2080 	return 0;
2081 }
2082 
chcr_ahash_import(struct ahash_request * areq,const void * in)2083 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2084 {
2085 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2086 	struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2087 
2088 	req_ctx->reqlen = state->reqlen;
2089 	req_ctx->data_len = state->data_len;
2090 	req_ctx->reqbfr = req_ctx->bfr1;
2091 	req_ctx->skbfr = req_ctx->bfr2;
2092 	memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2093 	memcpy(req_ctx->partial_hash, state->partial_hash,
2094 	       CHCR_HASH_MAX_DIGEST_SIZE);
2095 	chcr_init_hctx_per_wr(req_ctx);
2096 	return 0;
2097 }
2098 
chcr_ahash_setkey(struct crypto_ahash * tfm,const u8 * key,unsigned int keylen)2099 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2100 			     unsigned int keylen)
2101 {
2102 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2103 	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2104 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2105 	unsigned int i, err = 0, updated_digestsize;
2106 
2107 	SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2108 
2109 	/* use the key to calculate the ipad and opad. ipad will sent with the
2110 	 * first request's data. opad will be sent with the final hash result
2111 	 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2112 	 */
2113 	shash->tfm = hmacctx->base_hash;
2114 	if (keylen > bs) {
2115 		err = crypto_shash_digest(shash, key, keylen,
2116 					  hmacctx->ipad);
2117 		if (err)
2118 			goto out;
2119 		keylen = digestsize;
2120 	} else {
2121 		memcpy(hmacctx->ipad, key, keylen);
2122 	}
2123 	memset(hmacctx->ipad + keylen, 0, bs - keylen);
2124 	memcpy(hmacctx->opad, hmacctx->ipad, bs);
2125 
2126 	for (i = 0; i < bs / sizeof(int); i++) {
2127 		*((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2128 		*((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2129 	}
2130 
2131 	updated_digestsize = digestsize;
2132 	if (digestsize == SHA224_DIGEST_SIZE)
2133 		updated_digestsize = SHA256_DIGEST_SIZE;
2134 	else if (digestsize == SHA384_DIGEST_SIZE)
2135 		updated_digestsize = SHA512_DIGEST_SIZE;
2136 	err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2137 					hmacctx->ipad, digestsize);
2138 	if (err)
2139 		goto out;
2140 	chcr_change_order(hmacctx->ipad, updated_digestsize);
2141 
2142 	err = chcr_compute_partial_hash(shash, hmacctx->opad,
2143 					hmacctx->opad, digestsize);
2144 	if (err)
2145 		goto out;
2146 	chcr_change_order(hmacctx->opad, updated_digestsize);
2147 out:
2148 	return err;
2149 }
2150 
chcr_aes_xts_setkey(struct crypto_ablkcipher * cipher,const u8 * key,unsigned int key_len)2151 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2152 			       unsigned int key_len)
2153 {
2154 	struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2155 	unsigned short context_size = 0;
2156 	int err;
2157 
2158 	err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2159 	if (err)
2160 		goto badkey_err;
2161 
2162 	memcpy(ablkctx->key, key, key_len);
2163 	ablkctx->enckey_len = key_len;
2164 	get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2165 	context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2166 	ablkctx->key_ctx_hdr =
2167 		FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2168 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2169 				 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2170 				 CHCR_KEYCTX_NO_KEY, 1,
2171 				 0, context_size);
2172 	ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2173 	return 0;
2174 badkey_err:
2175 	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2176 	ablkctx->enckey_len = 0;
2177 
2178 	return err;
2179 }
2180 
chcr_sha_init(struct ahash_request * areq)2181 static int chcr_sha_init(struct ahash_request *areq)
2182 {
2183 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2184 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2185 	int digestsize =  crypto_ahash_digestsize(tfm);
2186 
2187 	req_ctx->data_len = 0;
2188 	req_ctx->reqlen = 0;
2189 	req_ctx->reqbfr = req_ctx->bfr1;
2190 	req_ctx->skbfr = req_ctx->bfr2;
2191 	copy_hash_init_values(req_ctx->partial_hash, digestsize);
2192 
2193 	return 0;
2194 }
2195 
chcr_sha_cra_init(struct crypto_tfm * tfm)2196 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2197 {
2198 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2199 				 sizeof(struct chcr_ahash_req_ctx));
2200 	return chcr_device_init(crypto_tfm_ctx(tfm));
2201 }
2202 
chcr_hmac_init(struct ahash_request * areq)2203 static int chcr_hmac_init(struct ahash_request *areq)
2204 {
2205 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2206 	struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2207 	struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2208 	unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2209 	unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2210 
2211 	chcr_sha_init(areq);
2212 	req_ctx->data_len = bs;
2213 	if (is_hmac(crypto_ahash_tfm(rtfm))) {
2214 		if (digestsize == SHA224_DIGEST_SIZE)
2215 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2216 			       SHA256_DIGEST_SIZE);
2217 		else if (digestsize == SHA384_DIGEST_SIZE)
2218 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2219 			       SHA512_DIGEST_SIZE);
2220 		else
2221 			memcpy(req_ctx->partial_hash, hmacctx->ipad,
2222 			       digestsize);
2223 	}
2224 	return 0;
2225 }
2226 
chcr_hmac_cra_init(struct crypto_tfm * tfm)2227 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2228 {
2229 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2230 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2231 	unsigned int digestsize =
2232 		crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2233 
2234 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2235 				 sizeof(struct chcr_ahash_req_ctx));
2236 	hmacctx->base_hash = chcr_alloc_shash(digestsize);
2237 	if (IS_ERR(hmacctx->base_hash))
2238 		return PTR_ERR(hmacctx->base_hash);
2239 	return chcr_device_init(crypto_tfm_ctx(tfm));
2240 }
2241 
chcr_hmac_cra_exit(struct crypto_tfm * tfm)2242 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2243 {
2244 	struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2245 	struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2246 
2247 	if (hmacctx->base_hash) {
2248 		chcr_free_shash(hmacctx->base_hash);
2249 		hmacctx->base_hash = NULL;
2250 	}
2251 }
2252 
chcr_aead_common_exit(struct aead_request * req)2253 inline void chcr_aead_common_exit(struct aead_request *req)
2254 {
2255 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2256 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2257 	struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2258 
2259 	chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2260 }
2261 
chcr_aead_common_init(struct aead_request * req)2262 static int chcr_aead_common_init(struct aead_request *req)
2263 {
2264 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2265 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2266 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2267 	unsigned int authsize = crypto_aead_authsize(tfm);
2268 	int error = -EINVAL;
2269 
2270 	/* validate key size */
2271 	if (aeadctx->enckey_len == 0)
2272 		goto err;
2273 	if (reqctx->op && req->cryptlen < authsize)
2274 		goto err;
2275 	if (reqctx->b0_len)
2276 		reqctx->scratch_pad = reqctx->iv + IV;
2277 	else
2278 		reqctx->scratch_pad = NULL;
2279 
2280 	error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2281 				  reqctx->op);
2282 	if (error) {
2283 		error = -ENOMEM;
2284 		goto err;
2285 	}
2286 
2287 	return 0;
2288 err:
2289 	return error;
2290 }
2291 
chcr_aead_need_fallback(struct aead_request * req,int dst_nents,int aadmax,int wrlen,unsigned short op_type)2292 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2293 				   int aadmax, int wrlen,
2294 				   unsigned short op_type)
2295 {
2296 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2297 
2298 	if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2299 	    dst_nents > MAX_DSGL_ENT ||
2300 	    (req->assoclen > aadmax) ||
2301 	    (wrlen > SGE_MAX_WR_LEN))
2302 		return 1;
2303 	return 0;
2304 }
2305 
chcr_aead_fallback(struct aead_request * req,unsigned short op_type)2306 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2307 {
2308 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2309 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2310 	struct aead_request *subreq = aead_request_ctx(req);
2311 
2312 	aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2313 	aead_request_set_callback(subreq, req->base.flags,
2314 				  req->base.complete, req->base.data);
2315 	aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2316 				 req->iv);
2317 	aead_request_set_ad(subreq, req->assoclen);
2318 	return op_type ? crypto_aead_decrypt(subreq) :
2319 		crypto_aead_encrypt(subreq);
2320 }
2321 
create_authenc_wr(struct aead_request * req,unsigned short qid,int size)2322 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2323 					 unsigned short qid,
2324 					 int size)
2325 {
2326 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2327 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2328 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2329 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2330 	struct sk_buff *skb = NULL;
2331 	struct chcr_wr *chcr_req;
2332 	struct cpl_rx_phys_dsgl *phys_cpl;
2333 	struct ulptx_sgl *ulptx;
2334 	unsigned int transhdr_len;
2335 	unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2336 	unsigned int   kctx_len = 0, dnents, snents;
2337 	unsigned int  authsize = crypto_aead_authsize(tfm);
2338 	int error = -EINVAL;
2339 	u8 *ivptr;
2340 	int null = 0;
2341 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2342 		GFP_ATOMIC;
2343 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2344 
2345 	if (req->cryptlen == 0)
2346 		return NULL;
2347 
2348 	reqctx->b0_len = 0;
2349 	error = chcr_aead_common_init(req);
2350 	if (error)
2351 		return ERR_PTR(error);
2352 
2353 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2354 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2355 		null = 1;
2356 	}
2357 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2358 		(reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2359 	dnents += MIN_AUTH_SG; // For IV
2360 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2361 			       CHCR_SRC_SG_SIZE, 0);
2362 	dst_size = get_space_for_phys_dsgl(dnents);
2363 	kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2364 		- sizeof(chcr_req->key_ctx);
2365 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2366 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2367 			SGE_MAX_WR_LEN;
2368 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2369 			: (sgl_len(snents) * 8);
2370 	transhdr_len += temp;
2371 	transhdr_len = roundup(transhdr_len, 16);
2372 
2373 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2374 				    transhdr_len, reqctx->op)) {
2375 		atomic_inc(&adap->chcr_stats.fallback);
2376 		chcr_aead_common_exit(req);
2377 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2378 	}
2379 	skb = alloc_skb(transhdr_len, flags);
2380 	if (!skb) {
2381 		error = -ENOMEM;
2382 		goto err;
2383 	}
2384 
2385 	chcr_req = __skb_put_zero(skb, transhdr_len);
2386 
2387 	temp  = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2388 
2389 	/*
2390 	 * Input order	is AAD,IV and Payload. where IV should be included as
2391 	 * the part of authdata. All other fields should be filled according
2392 	 * to the hardware spec
2393 	 */
2394 	chcr_req->sec_cpl.op_ivinsrtofst =
2395 		FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2396 	chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2397 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2398 					null ? 0 : 1 + IV,
2399 					null ? 0 : IV + req->assoclen,
2400 					req->assoclen + IV + 1,
2401 					(temp & 0x1F0) >> 4);
2402 	chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2403 					temp & 0xF,
2404 					null ? 0 : req->assoclen + IV + 1,
2405 					temp, temp);
2406 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2407 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2408 		temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2409 	else
2410 		temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2411 	chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2412 					(reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2413 					temp,
2414 					actx->auth_mode, aeadctx->hmac_ctrl,
2415 					IV >> 1);
2416 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2417 					 0, 0, dst_size);
2418 
2419 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2420 	if (reqctx->op == CHCR_ENCRYPT_OP ||
2421 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2422 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2423 		memcpy(chcr_req->key_ctx.key, aeadctx->key,
2424 		       aeadctx->enckey_len);
2425 	else
2426 		memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2427 		       aeadctx->enckey_len);
2428 
2429 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2430 	       actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2431 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2432 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2433 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2434 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2435 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2436 		memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2437 		memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2438 				CTR_RFC3686_IV_SIZE);
2439 		*(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2440 			CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2441 	} else {
2442 		memcpy(ivptr, req->iv, IV);
2443 	}
2444 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2445 	chcr_add_aead_src_ent(req, ulptx);
2446 	atomic_inc(&adap->chcr_stats.cipher_rqst);
2447 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2448 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2449 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2450 		   transhdr_len, temp, 0);
2451 	reqctx->skb = skb;
2452 
2453 	return skb;
2454 err:
2455 	chcr_aead_common_exit(req);
2456 
2457 	return ERR_PTR(error);
2458 }
2459 
chcr_aead_dma_map(struct device * dev,struct aead_request * req,unsigned short op_type)2460 int chcr_aead_dma_map(struct device *dev,
2461 		      struct aead_request *req,
2462 		      unsigned short op_type)
2463 {
2464 	int error;
2465 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2466 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2467 	unsigned int authsize = crypto_aead_authsize(tfm);
2468 	int dst_size;
2469 
2470 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2471 				-authsize : authsize);
2472 	if (!req->cryptlen || !dst_size)
2473 		return 0;
2474 	reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2475 					DMA_BIDIRECTIONAL);
2476 	if (dma_mapping_error(dev, reqctx->iv_dma))
2477 		return -ENOMEM;
2478 	if (reqctx->b0_len)
2479 		reqctx->b0_dma = reqctx->iv_dma + IV;
2480 	else
2481 		reqctx->b0_dma = 0;
2482 	if (req->src == req->dst) {
2483 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2484 				   DMA_BIDIRECTIONAL);
2485 		if (!error)
2486 			goto err;
2487 	} else {
2488 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2489 				   DMA_TO_DEVICE);
2490 		if (!error)
2491 			goto err;
2492 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2493 				   DMA_FROM_DEVICE);
2494 		if (!error) {
2495 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2496 				   DMA_TO_DEVICE);
2497 			goto err;
2498 		}
2499 	}
2500 
2501 	return 0;
2502 err:
2503 	dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2504 	return -ENOMEM;
2505 }
2506 
chcr_aead_dma_unmap(struct device * dev,struct aead_request * req,unsigned short op_type)2507 void chcr_aead_dma_unmap(struct device *dev,
2508 			 struct aead_request *req,
2509 			 unsigned short op_type)
2510 {
2511 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2512 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2513 	unsigned int authsize = crypto_aead_authsize(tfm);
2514 	int dst_size;
2515 
2516 	dst_size = req->assoclen + req->cryptlen + (op_type ?
2517 					-authsize : authsize);
2518 	if (!req->cryptlen || !dst_size)
2519 		return;
2520 
2521 	dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2522 					DMA_BIDIRECTIONAL);
2523 	if (req->src == req->dst) {
2524 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2525 				   DMA_BIDIRECTIONAL);
2526 	} else {
2527 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2528 				   DMA_TO_DEVICE);
2529 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2530 				   DMA_FROM_DEVICE);
2531 	}
2532 }
2533 
chcr_add_aead_src_ent(struct aead_request * req,struct ulptx_sgl * ulptx)2534 void chcr_add_aead_src_ent(struct aead_request *req,
2535 			   struct ulptx_sgl *ulptx)
2536 {
2537 	struct ulptx_walk ulp_walk;
2538 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2539 
2540 	if (reqctx->imm) {
2541 		u8 *buf = (u8 *)ulptx;
2542 
2543 		if (reqctx->b0_len) {
2544 			memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2545 			buf += reqctx->b0_len;
2546 		}
2547 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2548 				   buf, req->cryptlen + req->assoclen, 0);
2549 	} else {
2550 		ulptx_walk_init(&ulp_walk, ulptx);
2551 		if (reqctx->b0_len)
2552 			ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2553 					    reqctx->b0_dma);
2554 		ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2555 				  req->assoclen,  0);
2556 		ulptx_walk_end(&ulp_walk);
2557 	}
2558 }
2559 
chcr_add_aead_dst_ent(struct aead_request * req,struct cpl_rx_phys_dsgl * phys_cpl,unsigned short qid)2560 void chcr_add_aead_dst_ent(struct aead_request *req,
2561 			   struct cpl_rx_phys_dsgl *phys_cpl,
2562 			   unsigned short qid)
2563 {
2564 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2565 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2566 	struct dsgl_walk dsgl_walk;
2567 	unsigned int authsize = crypto_aead_authsize(tfm);
2568 	struct chcr_context *ctx = a_ctx(tfm);
2569 	u32 temp;
2570 
2571 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2572 	dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2573 	temp = req->assoclen + req->cryptlen +
2574 		(reqctx->op ? -authsize : authsize);
2575 	dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2576 	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2577 }
2578 
chcr_add_cipher_src_ent(struct ablkcipher_request * req,void * ulptx,struct cipher_wr_param * wrparam)2579 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2580 			     void *ulptx,
2581 			     struct  cipher_wr_param *wrparam)
2582 {
2583 	struct ulptx_walk ulp_walk;
2584 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2585 	u8 *buf = ulptx;
2586 
2587 	memcpy(buf, reqctx->iv, IV);
2588 	buf += IV;
2589 	if (reqctx->imm) {
2590 		sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2591 				   buf, wrparam->bytes, reqctx->processed);
2592 	} else {
2593 		ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2594 		ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2595 				  reqctx->src_ofst);
2596 		reqctx->srcsg = ulp_walk.last_sg;
2597 		reqctx->src_ofst = ulp_walk.last_sg_len;
2598 		ulptx_walk_end(&ulp_walk);
2599 	}
2600 }
2601 
chcr_add_cipher_dst_ent(struct ablkcipher_request * req,struct cpl_rx_phys_dsgl * phys_cpl,struct cipher_wr_param * wrparam,unsigned short qid)2602 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2603 			     struct cpl_rx_phys_dsgl *phys_cpl,
2604 			     struct  cipher_wr_param *wrparam,
2605 			     unsigned short qid)
2606 {
2607 	struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2608 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2609 	struct chcr_context *ctx = c_ctx(tfm);
2610 	struct dsgl_walk dsgl_walk;
2611 
2612 	dsgl_walk_init(&dsgl_walk, phys_cpl);
2613 	dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2614 			 reqctx->dst_ofst);
2615 	reqctx->dstsg = dsgl_walk.last_sg;
2616 	reqctx->dst_ofst = dsgl_walk.last_sg_len;
2617 
2618 	dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2619 }
2620 
chcr_add_hash_src_ent(struct ahash_request * req,struct ulptx_sgl * ulptx,struct hash_wr_param * param)2621 void chcr_add_hash_src_ent(struct ahash_request *req,
2622 			   struct ulptx_sgl *ulptx,
2623 			   struct hash_wr_param *param)
2624 {
2625 	struct ulptx_walk ulp_walk;
2626 	struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2627 
2628 	if (reqctx->hctx_wr.imm) {
2629 		u8 *buf = (u8 *)ulptx;
2630 
2631 		if (param->bfr_len) {
2632 			memcpy(buf, reqctx->reqbfr, param->bfr_len);
2633 			buf += param->bfr_len;
2634 		}
2635 
2636 		sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2637 				   sg_nents(reqctx->hctx_wr.srcsg), buf,
2638 				   param->sg_len, 0);
2639 	} else {
2640 		ulptx_walk_init(&ulp_walk, ulptx);
2641 		if (param->bfr_len)
2642 			ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2643 					    reqctx->hctx_wr.dma_addr);
2644 		ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2645 				  param->sg_len, reqctx->hctx_wr.src_ofst);
2646 		reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2647 		reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2648 		ulptx_walk_end(&ulp_walk);
2649 	}
2650 }
2651 
chcr_hash_dma_map(struct device * dev,struct ahash_request * req)2652 int chcr_hash_dma_map(struct device *dev,
2653 		      struct ahash_request *req)
2654 {
2655 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2656 	int error = 0;
2657 
2658 	if (!req->nbytes)
2659 		return 0;
2660 	error = dma_map_sg(dev, req->src, sg_nents(req->src),
2661 			   DMA_TO_DEVICE);
2662 	if (!error)
2663 		return -ENOMEM;
2664 	req_ctx->hctx_wr.is_sg_map = 1;
2665 	return 0;
2666 }
2667 
chcr_hash_dma_unmap(struct device * dev,struct ahash_request * req)2668 void chcr_hash_dma_unmap(struct device *dev,
2669 			 struct ahash_request *req)
2670 {
2671 	struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2672 
2673 	if (!req->nbytes)
2674 		return;
2675 
2676 	dma_unmap_sg(dev, req->src, sg_nents(req->src),
2677 			   DMA_TO_DEVICE);
2678 	req_ctx->hctx_wr.is_sg_map = 0;
2679 
2680 }
2681 
chcr_cipher_dma_map(struct device * dev,struct ablkcipher_request * req)2682 int chcr_cipher_dma_map(struct device *dev,
2683 			struct ablkcipher_request *req)
2684 {
2685 	int error;
2686 
2687 	if (req->src == req->dst) {
2688 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2689 				   DMA_BIDIRECTIONAL);
2690 		if (!error)
2691 			goto err;
2692 	} else {
2693 		error = dma_map_sg(dev, req->src, sg_nents(req->src),
2694 				   DMA_TO_DEVICE);
2695 		if (!error)
2696 			goto err;
2697 		error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2698 				   DMA_FROM_DEVICE);
2699 		if (!error) {
2700 			dma_unmap_sg(dev, req->src, sg_nents(req->src),
2701 				   DMA_TO_DEVICE);
2702 			goto err;
2703 		}
2704 	}
2705 
2706 	return 0;
2707 err:
2708 	return -ENOMEM;
2709 }
2710 
chcr_cipher_dma_unmap(struct device * dev,struct ablkcipher_request * req)2711 void chcr_cipher_dma_unmap(struct device *dev,
2712 			   struct ablkcipher_request *req)
2713 {
2714 	if (req->src == req->dst) {
2715 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2716 				   DMA_BIDIRECTIONAL);
2717 	} else {
2718 		dma_unmap_sg(dev, req->src, sg_nents(req->src),
2719 				   DMA_TO_DEVICE);
2720 		dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2721 				   DMA_FROM_DEVICE);
2722 	}
2723 }
2724 
set_msg_len(u8 * block,unsigned int msglen,int csize)2725 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2726 {
2727 	__be32 data;
2728 
2729 	memset(block, 0, csize);
2730 	block += csize;
2731 
2732 	if (csize >= 4)
2733 		csize = 4;
2734 	else if (msglen > (unsigned int)(1 << (8 * csize)))
2735 		return -EOVERFLOW;
2736 
2737 	data = cpu_to_be32(msglen);
2738 	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2739 
2740 	return 0;
2741 }
2742 
generate_b0(struct aead_request * req,u8 * ivptr,unsigned short op_type)2743 static int generate_b0(struct aead_request *req, u8 *ivptr,
2744 			unsigned short op_type)
2745 {
2746 	unsigned int l, lp, m;
2747 	int rc;
2748 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
2749 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2750 	u8 *b0 = reqctx->scratch_pad;
2751 
2752 	m = crypto_aead_authsize(aead);
2753 
2754 	memcpy(b0, ivptr, 16);
2755 
2756 	lp = b0[0];
2757 	l = lp + 1;
2758 
2759 	/* set m, bits 3-5 */
2760 	*b0 |= (8 * ((m - 2) / 2));
2761 
2762 	/* set adata, bit 6, if associated data is used */
2763 	if (req->assoclen)
2764 		*b0 |= 64;
2765 	rc = set_msg_len(b0 + 16 - l,
2766 			 (op_type == CHCR_DECRYPT_OP) ?
2767 			 req->cryptlen - m : req->cryptlen, l);
2768 
2769 	return rc;
2770 }
2771 
crypto_ccm_check_iv(const u8 * iv)2772 static inline int crypto_ccm_check_iv(const u8 *iv)
2773 {
2774 	/* 2 <= L <= 8, so 1 <= L' <= 7. */
2775 	if (iv[0] < 1 || iv[0] > 7)
2776 		return -EINVAL;
2777 
2778 	return 0;
2779 }
2780 
ccm_format_packet(struct aead_request * req,u8 * ivptr,unsigned int sub_type,unsigned short op_type,unsigned int assoclen)2781 static int ccm_format_packet(struct aead_request *req,
2782 			     u8 *ivptr,
2783 			     unsigned int sub_type,
2784 			     unsigned short op_type,
2785 			     unsigned int assoclen)
2786 {
2787 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2788 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2789 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2790 	int rc = 0;
2791 
2792 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2793 		ivptr[0] = 3;
2794 		memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2795 		memcpy(ivptr + 4, req->iv, 8);
2796 		memset(ivptr + 12, 0, 4);
2797 	} else {
2798 		memcpy(ivptr, req->iv, 16);
2799 	}
2800 	if (assoclen)
2801 		*((unsigned short *)(reqctx->scratch_pad + 16)) =
2802 				htons(assoclen);
2803 
2804 	rc = generate_b0(req, ivptr, op_type);
2805 	/* zero the ctr value */
2806 	memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2807 	return rc;
2808 }
2809 
fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu * sec_cpl,unsigned int dst_size,struct aead_request * req,unsigned short op_type)2810 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2811 				  unsigned int dst_size,
2812 				  struct aead_request *req,
2813 				  unsigned short op_type)
2814 {
2815 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2816 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2817 	unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2818 	unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2819 	unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2820 	unsigned int ccm_xtra;
2821 	unsigned char tag_offset = 0, auth_offset = 0;
2822 	unsigned int assoclen;
2823 
2824 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2825 		assoclen = req->assoclen - 8;
2826 	else
2827 		assoclen = req->assoclen;
2828 	ccm_xtra = CCM_B0_SIZE +
2829 		((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2830 
2831 	auth_offset = req->cryptlen ?
2832 		(req->assoclen + IV + 1 + ccm_xtra) : 0;
2833 	if (op_type == CHCR_DECRYPT_OP) {
2834 		if (crypto_aead_authsize(tfm) != req->cryptlen)
2835 			tag_offset = crypto_aead_authsize(tfm);
2836 		else
2837 			auth_offset = 0;
2838 	}
2839 
2840 
2841 	sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2842 					 2, 1);
2843 	sec_cpl->pldlen =
2844 		htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2845 	/* For CCM there wil be b0 always. So AAD start will be 1 always */
2846 	sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2847 				1 + IV,	IV + assoclen + ccm_xtra,
2848 				req->assoclen + IV + 1 + ccm_xtra, 0);
2849 
2850 	sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2851 					auth_offset, tag_offset,
2852 					(op_type == CHCR_ENCRYPT_OP) ? 0 :
2853 					crypto_aead_authsize(tfm));
2854 	sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2855 					(op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2856 					cipher_mode, mac_mode,
2857 					aeadctx->hmac_ctrl, IV >> 1);
2858 
2859 	sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2860 					0, dst_size);
2861 }
2862 
aead_ccm_validate_input(unsigned short op_type,struct aead_request * req,struct chcr_aead_ctx * aeadctx,unsigned int sub_type)2863 static int aead_ccm_validate_input(unsigned short op_type,
2864 				   struct aead_request *req,
2865 				   struct chcr_aead_ctx *aeadctx,
2866 				   unsigned int sub_type)
2867 {
2868 	if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2869 		if (crypto_ccm_check_iv(req->iv)) {
2870 			pr_err("CCM: IV check fails\n");
2871 			return -EINVAL;
2872 		}
2873 	} else {
2874 		if (req->assoclen != 16 && req->assoclen != 20) {
2875 			pr_err("RFC4309: Invalid AAD length %d\n",
2876 			       req->assoclen);
2877 			return -EINVAL;
2878 		}
2879 	}
2880 	return 0;
2881 }
2882 
create_aead_ccm_wr(struct aead_request * req,unsigned short qid,int size)2883 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2884 					  unsigned short qid,
2885 					  int size)
2886 {
2887 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2888 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2889 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2890 	struct sk_buff *skb = NULL;
2891 	struct chcr_wr *chcr_req;
2892 	struct cpl_rx_phys_dsgl *phys_cpl;
2893 	struct ulptx_sgl *ulptx;
2894 	unsigned int transhdr_len;
2895 	unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2896 	unsigned int sub_type, assoclen = req->assoclen;
2897 	unsigned int authsize = crypto_aead_authsize(tfm);
2898 	int error = -EINVAL;
2899 	u8 *ivptr;
2900 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2901 		GFP_ATOMIC;
2902 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2903 
2904 	sub_type = get_aead_subtype(tfm);
2905 	if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2906 		assoclen -= 8;
2907 	reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2908 	error = chcr_aead_common_init(req);
2909 	if (error)
2910 		return ERR_PTR(error);
2911 
2912 	error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2913 	if (error)
2914 		goto err;
2915 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2916 			+ (reqctx->op ? -authsize : authsize),
2917 			CHCR_DST_SG_SIZE, 0);
2918 	dnents += MIN_CCM_SG; // For IV and B0
2919 	dst_size = get_space_for_phys_dsgl(dnents);
2920 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2921 			       CHCR_SRC_SG_SIZE, 0);
2922 	snents += MIN_CCM_SG; //For B0
2923 	kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2924 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2925 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2926 		       reqctx->b0_len) <= SGE_MAX_WR_LEN;
2927 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2928 				     reqctx->b0_len, 16) :
2929 		(sgl_len(snents) *  8);
2930 	transhdr_len += temp;
2931 	transhdr_len = roundup(transhdr_len, 16);
2932 
2933 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2934 				reqctx->b0_len, transhdr_len, reqctx->op)) {
2935 		atomic_inc(&adap->chcr_stats.fallback);
2936 		chcr_aead_common_exit(req);
2937 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2938 	}
2939 	skb = alloc_skb(transhdr_len,  flags);
2940 
2941 	if (!skb) {
2942 		error = -ENOMEM;
2943 		goto err;
2944 	}
2945 
2946 	chcr_req = __skb_put_zero(skb, transhdr_len);
2947 
2948 	fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2949 
2950 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2951 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2952 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2953 			aeadctx->key, aeadctx->enckey_len);
2954 
2955 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2956 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2957 	ulptx = (struct ulptx_sgl *)(ivptr + IV);
2958 	error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2959 	if (error)
2960 		goto dstmap_fail;
2961 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
2962 	chcr_add_aead_src_ent(req, ulptx);
2963 
2964 	atomic_inc(&adap->chcr_stats.aead_rqst);
2965 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2966 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2967 		reqctx->b0_len) : 0);
2968 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2969 		    transhdr_len, temp, 0);
2970 	reqctx->skb = skb;
2971 
2972 	return skb;
2973 dstmap_fail:
2974 	kfree_skb(skb);
2975 err:
2976 	chcr_aead_common_exit(req);
2977 	return ERR_PTR(error);
2978 }
2979 
create_gcm_wr(struct aead_request * req,unsigned short qid,int size)2980 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2981 				     unsigned short qid,
2982 				     int size)
2983 {
2984 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2985 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2986 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2987 	struct sk_buff *skb = NULL;
2988 	struct chcr_wr *chcr_req;
2989 	struct cpl_rx_phys_dsgl *phys_cpl;
2990 	struct ulptx_sgl *ulptx;
2991 	unsigned int transhdr_len, dnents = 0, snents;
2992 	unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2993 	unsigned int authsize = crypto_aead_authsize(tfm);
2994 	int error = -EINVAL;
2995 	u8 *ivptr;
2996 	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2997 		GFP_ATOMIC;
2998 	struct adapter *adap = padap(a_ctx(tfm)->dev);
2999 
3000 	if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3001 		assoclen = req->assoclen - 8;
3002 
3003 	reqctx->b0_len = 0;
3004 	error = chcr_aead_common_init(req);
3005 	if (error)
3006 		return ERR_PTR(error);
3007 	dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3008 				(reqctx->op ? -authsize : authsize),
3009 				CHCR_DST_SG_SIZE, 0);
3010 	snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3011 			       CHCR_SRC_SG_SIZE, 0);
3012 	dnents += MIN_GCM_SG; // For IV
3013 	dst_size = get_space_for_phys_dsgl(dnents);
3014 	kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3015 	transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3016 	reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3017 			SGE_MAX_WR_LEN;
3018 	temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3019 		(sgl_len(snents) * 8);
3020 	transhdr_len += temp;
3021 	transhdr_len = roundup(transhdr_len, 16);
3022 	if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3023 			    transhdr_len, reqctx->op)) {
3024 
3025 		atomic_inc(&adap->chcr_stats.fallback);
3026 		chcr_aead_common_exit(req);
3027 		return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3028 	}
3029 	skb = alloc_skb(transhdr_len, flags);
3030 	if (!skb) {
3031 		error = -ENOMEM;
3032 		goto err;
3033 	}
3034 
3035 	chcr_req = __skb_put_zero(skb, transhdr_len);
3036 
3037 	//Offset of tag from end
3038 	temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3039 	chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3040 					a_ctx(tfm)->tx_chan_id, 2, 1);
3041 	chcr_req->sec_cpl.pldlen =
3042 		htonl(req->assoclen + IV + req->cryptlen);
3043 	chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3044 					assoclen ? 1 + IV : 0,
3045 					assoclen ? IV + assoclen : 0,
3046 					req->assoclen + IV + 1, 0);
3047 	chcr_req->sec_cpl.cipherstop_lo_authinsert =
3048 			FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3049 						temp, temp);
3050 	chcr_req->sec_cpl.seqno_numivs =
3051 			FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3052 					CHCR_ENCRYPT_OP) ? 1 : 0,
3053 					CHCR_SCMD_CIPHER_MODE_AES_GCM,
3054 					CHCR_SCMD_AUTH_MODE_GHASH,
3055 					aeadctx->hmac_ctrl, IV >> 1);
3056 	chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3057 					0, 0, dst_size);
3058 	chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3059 	memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3060 	memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3061 	       GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3062 
3063 	phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3064 	ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3065 	/* prepare a 16 byte iv */
3066 	/* S   A   L  T |  IV | 0x00000001 */
3067 	if (get_aead_subtype(tfm) ==
3068 	    CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3069 		memcpy(ivptr, aeadctx->salt, 4);
3070 		memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3071 	} else {
3072 		memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3073 	}
3074 	*((unsigned int *)(ivptr + 12)) = htonl(0x01);
3075 
3076 	ulptx = (struct ulptx_sgl *)(ivptr + 16);
3077 
3078 	chcr_add_aead_dst_ent(req, phys_cpl, qid);
3079 	chcr_add_aead_src_ent(req, ulptx);
3080 	atomic_inc(&adap->chcr_stats.aead_rqst);
3081 	temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3082 		kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3083 	create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3084 		    transhdr_len, temp, reqctx->verify);
3085 	reqctx->skb = skb;
3086 	return skb;
3087 
3088 err:
3089 	chcr_aead_common_exit(req);
3090 	return ERR_PTR(error);
3091 }
3092 
3093 
3094 
chcr_aead_cra_init(struct crypto_aead * tfm)3095 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3096 {
3097 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3098 	struct aead_alg *alg = crypto_aead_alg(tfm);
3099 
3100 	aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3101 					       CRYPTO_ALG_NEED_FALLBACK |
3102 					       CRYPTO_ALG_ASYNC);
3103 	if  (IS_ERR(aeadctx->sw_cipher))
3104 		return PTR_ERR(aeadctx->sw_cipher);
3105 	crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3106 				 sizeof(struct aead_request) +
3107 				 crypto_aead_reqsize(aeadctx->sw_cipher)));
3108 	return chcr_device_init(a_ctx(tfm));
3109 }
3110 
chcr_aead_cra_exit(struct crypto_aead * tfm)3111 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3112 {
3113 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3114 
3115 	crypto_free_aead(aeadctx->sw_cipher);
3116 }
3117 
chcr_authenc_null_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3118 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3119 					unsigned int authsize)
3120 {
3121 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3122 
3123 	aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3124 	aeadctx->mayverify = VERIFY_HW;
3125 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3126 }
chcr_authenc_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3127 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3128 				    unsigned int authsize)
3129 {
3130 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3131 	u32 maxauth = crypto_aead_maxauthsize(tfm);
3132 
3133 	/*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3134 	 * true for sha1. authsize == 12 condition should be before
3135 	 * authsize == (maxauth >> 1)
3136 	 */
3137 	if (authsize == ICV_4) {
3138 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3139 		aeadctx->mayverify = VERIFY_HW;
3140 	} else if (authsize == ICV_6) {
3141 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3142 		aeadctx->mayverify = VERIFY_HW;
3143 	} else if (authsize == ICV_10) {
3144 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3145 		aeadctx->mayverify = VERIFY_HW;
3146 	} else if (authsize == ICV_12) {
3147 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3148 		aeadctx->mayverify = VERIFY_HW;
3149 	} else if (authsize == ICV_14) {
3150 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3151 		aeadctx->mayverify = VERIFY_HW;
3152 	} else if (authsize == (maxauth >> 1)) {
3153 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3154 		aeadctx->mayverify = VERIFY_HW;
3155 	} else if (authsize == maxauth) {
3156 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3157 		aeadctx->mayverify = VERIFY_HW;
3158 	} else {
3159 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160 		aeadctx->mayverify = VERIFY_SW;
3161 	}
3162 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3163 }
3164 
3165 
chcr_gcm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3166 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3167 {
3168 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3169 
3170 	switch (authsize) {
3171 	case ICV_4:
3172 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3173 		aeadctx->mayverify = VERIFY_HW;
3174 		break;
3175 	case ICV_8:
3176 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3177 		aeadctx->mayverify = VERIFY_HW;
3178 		break;
3179 	case ICV_12:
3180 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3181 		aeadctx->mayverify = VERIFY_HW;
3182 		break;
3183 	case ICV_14:
3184 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3185 		aeadctx->mayverify = VERIFY_HW;
3186 		break;
3187 	case ICV_16:
3188 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3189 		aeadctx->mayverify = VERIFY_HW;
3190 		break;
3191 	case ICV_13:
3192 	case ICV_15:
3193 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3194 		aeadctx->mayverify = VERIFY_SW;
3195 		break;
3196 	default:
3197 
3198 		  crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3199 			CRYPTO_TFM_RES_BAD_KEY_LEN);
3200 		return -EINVAL;
3201 	}
3202 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3203 }
3204 
chcr_4106_4309_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3205 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3206 					  unsigned int authsize)
3207 {
3208 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3209 
3210 	switch (authsize) {
3211 	case ICV_8:
3212 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3213 		aeadctx->mayverify = VERIFY_HW;
3214 		break;
3215 	case ICV_12:
3216 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3217 		aeadctx->mayverify = VERIFY_HW;
3218 		break;
3219 	case ICV_16:
3220 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3221 		aeadctx->mayverify = VERIFY_HW;
3222 		break;
3223 	default:
3224 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3225 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3226 		return -EINVAL;
3227 	}
3228 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3229 }
3230 
chcr_ccm_setauthsize(struct crypto_aead * tfm,unsigned int authsize)3231 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3232 				unsigned int authsize)
3233 {
3234 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3235 
3236 	switch (authsize) {
3237 	case ICV_4:
3238 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3239 		aeadctx->mayverify = VERIFY_HW;
3240 		break;
3241 	case ICV_6:
3242 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3243 		aeadctx->mayverify = VERIFY_HW;
3244 		break;
3245 	case ICV_8:
3246 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3247 		aeadctx->mayverify = VERIFY_HW;
3248 		break;
3249 	case ICV_10:
3250 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3251 		aeadctx->mayverify = VERIFY_HW;
3252 		break;
3253 	case ICV_12:
3254 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3255 		aeadctx->mayverify = VERIFY_HW;
3256 		break;
3257 	case ICV_14:
3258 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3259 		aeadctx->mayverify = VERIFY_HW;
3260 		break;
3261 	case ICV_16:
3262 		aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3263 		aeadctx->mayverify = VERIFY_HW;
3264 		break;
3265 	default:
3266 		crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3267 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3268 		return -EINVAL;
3269 	}
3270 	return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3271 }
3272 
chcr_ccm_common_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3273 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3274 				const u8 *key,
3275 				unsigned int keylen)
3276 {
3277 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3278 	unsigned char ck_size, mk_size;
3279 	int key_ctx_size = 0;
3280 
3281 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3282 	if (keylen == AES_KEYSIZE_128) {
3283 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3284 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3285 	} else if (keylen == AES_KEYSIZE_192) {
3286 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3287 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3288 	} else if (keylen == AES_KEYSIZE_256) {
3289 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3290 		mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3291 	} else {
3292 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3293 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3294 		aeadctx->enckey_len = 0;
3295 		return	-EINVAL;
3296 	}
3297 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3298 						key_ctx_size >> 4);
3299 	memcpy(aeadctx->key, key, keylen);
3300 	aeadctx->enckey_len = keylen;
3301 
3302 	return 0;
3303 }
3304 
chcr_aead_ccm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3305 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3306 				const u8 *key,
3307 				unsigned int keylen)
3308 {
3309 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3310 	int error;
3311 
3312 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3313 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3314 			      CRYPTO_TFM_REQ_MASK);
3315 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3316 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3317 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3318 			      CRYPTO_TFM_RES_MASK);
3319 	if (error)
3320 		return error;
3321 	return chcr_ccm_common_setkey(aead, key, keylen);
3322 }
3323 
chcr_aead_rfc4309_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3324 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3325 				    unsigned int keylen)
3326 {
3327 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3328 	int error;
3329 
3330 	if (keylen < 3) {
3331 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3332 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3333 		aeadctx->enckey_len = 0;
3334 		return	-EINVAL;
3335 	}
3336 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3337 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3338 			      CRYPTO_TFM_REQ_MASK);
3339 	error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3340 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3341 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3342 			      CRYPTO_TFM_RES_MASK);
3343 	if (error)
3344 		return error;
3345 	keylen -= 3;
3346 	memcpy(aeadctx->salt, key + keylen, 3);
3347 	return chcr_ccm_common_setkey(aead, key, keylen);
3348 }
3349 
chcr_gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)3350 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3351 			   unsigned int keylen)
3352 {
3353 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3354 	struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3355 	unsigned int ck_size;
3356 	int ret = 0, key_ctx_size = 0;
3357 	struct crypto_aes_ctx aes;
3358 
3359 	aeadctx->enckey_len = 0;
3360 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3361 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3362 			      & CRYPTO_TFM_REQ_MASK);
3363 	ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3364 	crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3365 	crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3366 			      CRYPTO_TFM_RES_MASK);
3367 	if (ret)
3368 		goto out;
3369 
3370 	if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3371 	    keylen > 3) {
3372 		keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3373 		memcpy(aeadctx->salt, key + keylen, 4);
3374 	}
3375 	if (keylen == AES_KEYSIZE_128) {
3376 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3377 	} else if (keylen == AES_KEYSIZE_192) {
3378 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3379 	} else if (keylen == AES_KEYSIZE_256) {
3380 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3381 	} else {
3382 		crypto_tfm_set_flags((struct crypto_tfm *)aead,
3383 				     CRYPTO_TFM_RES_BAD_KEY_LEN);
3384 		pr_err("GCM: Invalid key length %d\n", keylen);
3385 		ret = -EINVAL;
3386 		goto out;
3387 	}
3388 
3389 	memcpy(aeadctx->key, key, keylen);
3390 	aeadctx->enckey_len = keylen;
3391 	key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3392 		AEAD_H_SIZE;
3393 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3394 						CHCR_KEYCTX_MAC_KEY_SIZE_128,
3395 						0, 0,
3396 						key_ctx_size >> 4);
3397 	/* Calculate the H = CIPH(K, 0 repeated 16 times).
3398 	 * It will go in key context
3399 	 */
3400 	ret = aes_expandkey(&aes, key, keylen);
3401 	if (ret) {
3402 		aeadctx->enckey_len = 0;
3403 		goto out;
3404 	}
3405 	memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3406 	aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3407 	memzero_explicit(&aes, sizeof(aes));
3408 
3409 out:
3410 	return ret;
3411 }
3412 
chcr_authenc_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3413 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3414 				   unsigned int keylen)
3415 {
3416 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3417 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3418 	/* it contains auth and cipher key both*/
3419 	struct crypto_authenc_keys keys;
3420 	unsigned int bs, subtype;
3421 	unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3422 	int err = 0, i, key_ctx_len = 0;
3423 	unsigned char ck_size = 0;
3424 	unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3425 	struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3426 	struct algo_param param;
3427 	int align;
3428 	u8 *o_ptr = NULL;
3429 
3430 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3431 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3432 			      & CRYPTO_TFM_REQ_MASK);
3433 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3434 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3435 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3436 			      & CRYPTO_TFM_RES_MASK);
3437 	if (err)
3438 		goto out;
3439 
3440 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3441 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3442 		goto out;
3443 	}
3444 
3445 	if (get_alg_config(&param, max_authsize)) {
3446 		pr_err("chcr : Unsupported digest size\n");
3447 		goto out;
3448 	}
3449 	subtype = get_aead_subtype(authenc);
3450 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3451 		subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3452 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3453 			goto out;
3454 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3455 		- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3456 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3457 	}
3458 	if (keys.enckeylen == AES_KEYSIZE_128) {
3459 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3460 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3461 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3462 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3463 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3464 	} else {
3465 		pr_err("chcr : Unsupported cipher key\n");
3466 		goto out;
3467 	}
3468 
3469 	/* Copy only encryption key. We use authkey to generate h(ipad) and
3470 	 * h(opad) so authkey is not needed again. authkeylen size have the
3471 	 * size of the hash digest size.
3472 	 */
3473 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3474 	aeadctx->enckey_len = keys.enckeylen;
3475 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3476 		subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3477 
3478 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3479 			    aeadctx->enckey_len << 3);
3480 	}
3481 	base_hash  = chcr_alloc_shash(max_authsize);
3482 	if (IS_ERR(base_hash)) {
3483 		pr_err("chcr : Base driver cannot be loaded\n");
3484 		aeadctx->enckey_len = 0;
3485 		memzero_explicit(&keys, sizeof(keys));
3486 		return -EINVAL;
3487 	}
3488 	{
3489 		SHASH_DESC_ON_STACK(shash, base_hash);
3490 
3491 		shash->tfm = base_hash;
3492 		bs = crypto_shash_blocksize(base_hash);
3493 		align = KEYCTX_ALIGN_PAD(max_authsize);
3494 		o_ptr =  actx->h_iopad + param.result_size + align;
3495 
3496 		if (keys.authkeylen > bs) {
3497 			err = crypto_shash_digest(shash, keys.authkey,
3498 						  keys.authkeylen,
3499 						  o_ptr);
3500 			if (err) {
3501 				pr_err("chcr : Base driver cannot be loaded\n");
3502 				goto out;
3503 			}
3504 			keys.authkeylen = max_authsize;
3505 		} else
3506 			memcpy(o_ptr, keys.authkey, keys.authkeylen);
3507 
3508 		/* Compute the ipad-digest*/
3509 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3510 		memcpy(pad, o_ptr, keys.authkeylen);
3511 		for (i = 0; i < bs >> 2; i++)
3512 			*((unsigned int *)pad + i) ^= IPAD_DATA;
3513 
3514 		if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3515 					      max_authsize))
3516 			goto out;
3517 		/* Compute the opad-digest */
3518 		memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3519 		memcpy(pad, o_ptr, keys.authkeylen);
3520 		for (i = 0; i < bs >> 2; i++)
3521 			*((unsigned int *)pad + i) ^= OPAD_DATA;
3522 
3523 		if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3524 			goto out;
3525 
3526 		/* convert the ipad and opad digest to network order */
3527 		chcr_change_order(actx->h_iopad, param.result_size);
3528 		chcr_change_order(o_ptr, param.result_size);
3529 		key_ctx_len = sizeof(struct _key_ctx) +
3530 			roundup(keys.enckeylen, 16) +
3531 			(param.result_size + align) * 2;
3532 		aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3533 						0, 1, key_ctx_len >> 4);
3534 		actx->auth_mode = param.auth_mode;
3535 		chcr_free_shash(base_hash);
3536 
3537 		memzero_explicit(&keys, sizeof(keys));
3538 		return 0;
3539 	}
3540 out:
3541 	aeadctx->enckey_len = 0;
3542 	memzero_explicit(&keys, sizeof(keys));
3543 	if (!IS_ERR(base_hash))
3544 		chcr_free_shash(base_hash);
3545 	return -EINVAL;
3546 }
3547 
chcr_aead_digest_null_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)3548 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3549 					const u8 *key, unsigned int keylen)
3550 {
3551 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3552 	struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3553 	struct crypto_authenc_keys keys;
3554 	int err;
3555 	/* it contains auth and cipher key both*/
3556 	unsigned int subtype;
3557 	int key_ctx_len = 0;
3558 	unsigned char ck_size = 0;
3559 
3560 	crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3561 	crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3562 			      & CRYPTO_TFM_REQ_MASK);
3563 	err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3564 	crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3565 	crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3566 			      & CRYPTO_TFM_RES_MASK);
3567 	if (err)
3568 		goto out;
3569 
3570 	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3571 		crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3572 		goto out;
3573 	}
3574 	subtype = get_aead_subtype(authenc);
3575 	if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3576 	    subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3577 		if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3578 			goto out;
3579 		memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3580 			- CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3581 		keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3582 	}
3583 	if (keys.enckeylen == AES_KEYSIZE_128) {
3584 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3585 	} else if (keys.enckeylen == AES_KEYSIZE_192) {
3586 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3587 	} else if (keys.enckeylen == AES_KEYSIZE_256) {
3588 		ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3589 	} else {
3590 		pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3591 		goto out;
3592 	}
3593 	memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3594 	aeadctx->enckey_len = keys.enckeylen;
3595 	if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3596 	    subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3597 		get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3598 				aeadctx->enckey_len << 3);
3599 	}
3600 	key_ctx_len =  sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3601 
3602 	aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3603 						0, key_ctx_len >> 4);
3604 	actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3605 	memzero_explicit(&keys, sizeof(keys));
3606 	return 0;
3607 out:
3608 	aeadctx->enckey_len = 0;
3609 	memzero_explicit(&keys, sizeof(keys));
3610 	return -EINVAL;
3611 }
3612 
chcr_aead_op(struct aead_request * req,int size,create_wr_t create_wr_fn)3613 static int chcr_aead_op(struct aead_request *req,
3614 			int size,
3615 			create_wr_t create_wr_fn)
3616 {
3617 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3618 	struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
3619 	struct uld_ctx *u_ctx;
3620 	struct sk_buff *skb;
3621 	int isfull = 0;
3622 	struct chcr_dev *cdev;
3623 
3624 	cdev = a_ctx(tfm)->dev;
3625 	if (!cdev) {
3626 		pr_err("chcr : %s : No crypto device.\n", __func__);
3627 		return -ENXIO;
3628 	}
3629 
3630 	if (chcr_inc_wrcount(cdev)) {
3631 	/* Detach state for CHCR means lldi or padap is freed.
3632 	 * We cannot increment fallback here.
3633 	 */
3634 		return chcr_aead_fallback(req, reqctx->op);
3635 	}
3636 
3637 	u_ctx = ULD_CTX(a_ctx(tfm));
3638 	if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3639 				   a_ctx(tfm)->tx_qidx)) {
3640 		isfull = 1;
3641 		if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3642 			chcr_dec_wrcount(cdev);
3643 			return -ENOSPC;
3644 		}
3645 	}
3646 
3647 	/* Form a WR from req */
3648 	skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3649 
3650 	if (IS_ERR_OR_NULL(skb)) {
3651 		chcr_dec_wrcount(cdev);
3652 		return PTR_ERR_OR_ZERO(skb);
3653 	}
3654 
3655 	skb->dev = u_ctx->lldi.ports[0];
3656 	set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3657 	chcr_send_wr(skb);
3658 	return isfull ? -EBUSY : -EINPROGRESS;
3659 }
3660 
chcr_aead_encrypt(struct aead_request * req)3661 static int chcr_aead_encrypt(struct aead_request *req)
3662 {
3663 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3664 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3665 
3666 	reqctx->verify = VERIFY_HW;
3667 	reqctx->op = CHCR_ENCRYPT_OP;
3668 
3669 	switch (get_aead_subtype(tfm)) {
3670 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3671 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3672 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3673 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3674 		return chcr_aead_op(req, 0, create_authenc_wr);
3675 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3676 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3677 		return chcr_aead_op(req, 0, create_aead_ccm_wr);
3678 	default:
3679 		return chcr_aead_op(req, 0, create_gcm_wr);
3680 	}
3681 }
3682 
chcr_aead_decrypt(struct aead_request * req)3683 static int chcr_aead_decrypt(struct aead_request *req)
3684 {
3685 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3686 	struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3687 	struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3688 	int size;
3689 
3690 	if (aeadctx->mayverify == VERIFY_SW) {
3691 		size = crypto_aead_maxauthsize(tfm);
3692 		reqctx->verify = VERIFY_SW;
3693 	} else {
3694 		size = 0;
3695 		reqctx->verify = VERIFY_HW;
3696 	}
3697 	reqctx->op = CHCR_DECRYPT_OP;
3698 	switch (get_aead_subtype(tfm)) {
3699 	case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3700 	case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3701 	case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3702 	case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3703 		return chcr_aead_op(req, size, create_authenc_wr);
3704 	case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3705 	case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3706 		return chcr_aead_op(req, size, create_aead_ccm_wr);
3707 	default:
3708 		return chcr_aead_op(req, size, create_gcm_wr);
3709 	}
3710 }
3711 
3712 static struct chcr_alg_template driver_algs[] = {
3713 	/* AES-CBC */
3714 	{
3715 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3716 		.is_registered = 0,
3717 		.alg.crypto = {
3718 			.cra_name		= "cbc(aes)",
3719 			.cra_driver_name	= "cbc-aes-chcr",
3720 			.cra_blocksize		= AES_BLOCK_SIZE,
3721 			.cra_init		= chcr_cra_init,
3722 			.cra_exit		= chcr_cra_exit,
3723 			.cra_u.ablkcipher	= {
3724 				.min_keysize	= AES_MIN_KEY_SIZE,
3725 				.max_keysize	= AES_MAX_KEY_SIZE,
3726 				.ivsize		= AES_BLOCK_SIZE,
3727 				.setkey			= chcr_aes_cbc_setkey,
3728 				.encrypt		= chcr_aes_encrypt,
3729 				.decrypt		= chcr_aes_decrypt,
3730 			}
3731 		}
3732 	},
3733 	{
3734 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3735 		.is_registered = 0,
3736 		.alg.crypto =   {
3737 			.cra_name		= "xts(aes)",
3738 			.cra_driver_name	= "xts-aes-chcr",
3739 			.cra_blocksize		= AES_BLOCK_SIZE,
3740 			.cra_init		= chcr_cra_init,
3741 			.cra_exit		= NULL,
3742 			.cra_u .ablkcipher = {
3743 					.min_keysize	= 2 * AES_MIN_KEY_SIZE,
3744 					.max_keysize	= 2 * AES_MAX_KEY_SIZE,
3745 					.ivsize		= AES_BLOCK_SIZE,
3746 					.setkey		= chcr_aes_xts_setkey,
3747 					.encrypt	= chcr_aes_encrypt,
3748 					.decrypt	= chcr_aes_decrypt,
3749 				}
3750 			}
3751 	},
3752 	{
3753 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3754 		.is_registered = 0,
3755 		.alg.crypto = {
3756 			.cra_name		= "ctr(aes)",
3757 			.cra_driver_name	= "ctr-aes-chcr",
3758 			.cra_blocksize		= 1,
3759 			.cra_init		= chcr_cra_init,
3760 			.cra_exit		= chcr_cra_exit,
3761 			.cra_u.ablkcipher	= {
3762 				.min_keysize	= AES_MIN_KEY_SIZE,
3763 				.max_keysize	= AES_MAX_KEY_SIZE,
3764 				.ivsize		= AES_BLOCK_SIZE,
3765 				.setkey		= chcr_aes_ctr_setkey,
3766 				.encrypt	= chcr_aes_encrypt,
3767 				.decrypt	= chcr_aes_decrypt,
3768 			}
3769 		}
3770 	},
3771 	{
3772 		.type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3773 			CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3774 		.is_registered = 0,
3775 		.alg.crypto = {
3776 			.cra_name		= "rfc3686(ctr(aes))",
3777 			.cra_driver_name	= "rfc3686-ctr-aes-chcr",
3778 			.cra_blocksize		= 1,
3779 			.cra_init		= chcr_rfc3686_init,
3780 			.cra_exit		= chcr_cra_exit,
3781 			.cra_u.ablkcipher	= {
3782 				.min_keysize	= AES_MIN_KEY_SIZE +
3783 					CTR_RFC3686_NONCE_SIZE,
3784 				.max_keysize	= AES_MAX_KEY_SIZE +
3785 					CTR_RFC3686_NONCE_SIZE,
3786 				.ivsize		= CTR_RFC3686_IV_SIZE,
3787 				.setkey		= chcr_aes_rfc3686_setkey,
3788 				.encrypt	= chcr_aes_encrypt,
3789 				.decrypt	= chcr_aes_decrypt,
3790 			}
3791 		}
3792 	},
3793 	/* SHA */
3794 	{
3795 		.type = CRYPTO_ALG_TYPE_AHASH,
3796 		.is_registered = 0,
3797 		.alg.hash = {
3798 			.halg.digestsize = SHA1_DIGEST_SIZE,
3799 			.halg.base = {
3800 				.cra_name = "sha1",
3801 				.cra_driver_name = "sha1-chcr",
3802 				.cra_blocksize = SHA1_BLOCK_SIZE,
3803 			}
3804 		}
3805 	},
3806 	{
3807 		.type = CRYPTO_ALG_TYPE_AHASH,
3808 		.is_registered = 0,
3809 		.alg.hash = {
3810 			.halg.digestsize = SHA256_DIGEST_SIZE,
3811 			.halg.base = {
3812 				.cra_name = "sha256",
3813 				.cra_driver_name = "sha256-chcr",
3814 				.cra_blocksize = SHA256_BLOCK_SIZE,
3815 			}
3816 		}
3817 	},
3818 	{
3819 		.type = CRYPTO_ALG_TYPE_AHASH,
3820 		.is_registered = 0,
3821 		.alg.hash = {
3822 			.halg.digestsize = SHA224_DIGEST_SIZE,
3823 			.halg.base = {
3824 				.cra_name = "sha224",
3825 				.cra_driver_name = "sha224-chcr",
3826 				.cra_blocksize = SHA224_BLOCK_SIZE,
3827 			}
3828 		}
3829 	},
3830 	{
3831 		.type = CRYPTO_ALG_TYPE_AHASH,
3832 		.is_registered = 0,
3833 		.alg.hash = {
3834 			.halg.digestsize = SHA384_DIGEST_SIZE,
3835 			.halg.base = {
3836 				.cra_name = "sha384",
3837 				.cra_driver_name = "sha384-chcr",
3838 				.cra_blocksize = SHA384_BLOCK_SIZE,
3839 			}
3840 		}
3841 	},
3842 	{
3843 		.type = CRYPTO_ALG_TYPE_AHASH,
3844 		.is_registered = 0,
3845 		.alg.hash = {
3846 			.halg.digestsize = SHA512_DIGEST_SIZE,
3847 			.halg.base = {
3848 				.cra_name = "sha512",
3849 				.cra_driver_name = "sha512-chcr",
3850 				.cra_blocksize = SHA512_BLOCK_SIZE,
3851 			}
3852 		}
3853 	},
3854 	/* HMAC */
3855 	{
3856 		.type = CRYPTO_ALG_TYPE_HMAC,
3857 		.is_registered = 0,
3858 		.alg.hash = {
3859 			.halg.digestsize = SHA1_DIGEST_SIZE,
3860 			.halg.base = {
3861 				.cra_name = "hmac(sha1)",
3862 				.cra_driver_name = "hmac-sha1-chcr",
3863 				.cra_blocksize = SHA1_BLOCK_SIZE,
3864 			}
3865 		}
3866 	},
3867 	{
3868 		.type = CRYPTO_ALG_TYPE_HMAC,
3869 		.is_registered = 0,
3870 		.alg.hash = {
3871 			.halg.digestsize = SHA224_DIGEST_SIZE,
3872 			.halg.base = {
3873 				.cra_name = "hmac(sha224)",
3874 				.cra_driver_name = "hmac-sha224-chcr",
3875 				.cra_blocksize = SHA224_BLOCK_SIZE,
3876 			}
3877 		}
3878 	},
3879 	{
3880 		.type = CRYPTO_ALG_TYPE_HMAC,
3881 		.is_registered = 0,
3882 		.alg.hash = {
3883 			.halg.digestsize = SHA256_DIGEST_SIZE,
3884 			.halg.base = {
3885 				.cra_name = "hmac(sha256)",
3886 				.cra_driver_name = "hmac-sha256-chcr",
3887 				.cra_blocksize = SHA256_BLOCK_SIZE,
3888 			}
3889 		}
3890 	},
3891 	{
3892 		.type = CRYPTO_ALG_TYPE_HMAC,
3893 		.is_registered = 0,
3894 		.alg.hash = {
3895 			.halg.digestsize = SHA384_DIGEST_SIZE,
3896 			.halg.base = {
3897 				.cra_name = "hmac(sha384)",
3898 				.cra_driver_name = "hmac-sha384-chcr",
3899 				.cra_blocksize = SHA384_BLOCK_SIZE,
3900 			}
3901 		}
3902 	},
3903 	{
3904 		.type = CRYPTO_ALG_TYPE_HMAC,
3905 		.is_registered = 0,
3906 		.alg.hash = {
3907 			.halg.digestsize = SHA512_DIGEST_SIZE,
3908 			.halg.base = {
3909 				.cra_name = "hmac(sha512)",
3910 				.cra_driver_name = "hmac-sha512-chcr",
3911 				.cra_blocksize = SHA512_BLOCK_SIZE,
3912 			}
3913 		}
3914 	},
3915 	/* Add AEAD Algorithms */
3916 	{
3917 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3918 		.is_registered = 0,
3919 		.alg.aead = {
3920 			.base = {
3921 				.cra_name = "gcm(aes)",
3922 				.cra_driver_name = "gcm-aes-chcr",
3923 				.cra_blocksize	= 1,
3924 				.cra_priority = CHCR_AEAD_PRIORITY,
3925 				.cra_ctxsize =	sizeof(struct chcr_context) +
3926 						sizeof(struct chcr_aead_ctx) +
3927 						sizeof(struct chcr_gcm_ctx),
3928 			},
3929 			.ivsize = GCM_AES_IV_SIZE,
3930 			.maxauthsize = GHASH_DIGEST_SIZE,
3931 			.setkey = chcr_gcm_setkey,
3932 			.setauthsize = chcr_gcm_setauthsize,
3933 		}
3934 	},
3935 	{
3936 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3937 		.is_registered = 0,
3938 		.alg.aead = {
3939 			.base = {
3940 				.cra_name = "rfc4106(gcm(aes))",
3941 				.cra_driver_name = "rfc4106-gcm-aes-chcr",
3942 				.cra_blocksize	 = 1,
3943 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3944 				.cra_ctxsize =	sizeof(struct chcr_context) +
3945 						sizeof(struct chcr_aead_ctx) +
3946 						sizeof(struct chcr_gcm_ctx),
3947 
3948 			},
3949 			.ivsize = GCM_RFC4106_IV_SIZE,
3950 			.maxauthsize	= GHASH_DIGEST_SIZE,
3951 			.setkey = chcr_gcm_setkey,
3952 			.setauthsize	= chcr_4106_4309_setauthsize,
3953 		}
3954 	},
3955 	{
3956 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3957 		.is_registered = 0,
3958 		.alg.aead = {
3959 			.base = {
3960 				.cra_name = "ccm(aes)",
3961 				.cra_driver_name = "ccm-aes-chcr",
3962 				.cra_blocksize	 = 1,
3963 				.cra_priority = CHCR_AEAD_PRIORITY,
3964 				.cra_ctxsize =	sizeof(struct chcr_context) +
3965 						sizeof(struct chcr_aead_ctx),
3966 
3967 			},
3968 			.ivsize = AES_BLOCK_SIZE,
3969 			.maxauthsize	= GHASH_DIGEST_SIZE,
3970 			.setkey = chcr_aead_ccm_setkey,
3971 			.setauthsize	= chcr_ccm_setauthsize,
3972 		}
3973 	},
3974 	{
3975 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3976 		.is_registered = 0,
3977 		.alg.aead = {
3978 			.base = {
3979 				.cra_name = "rfc4309(ccm(aes))",
3980 				.cra_driver_name = "rfc4309-ccm-aes-chcr",
3981 				.cra_blocksize	 = 1,
3982 				.cra_priority = CHCR_AEAD_PRIORITY + 1,
3983 				.cra_ctxsize =	sizeof(struct chcr_context) +
3984 						sizeof(struct chcr_aead_ctx),
3985 
3986 			},
3987 			.ivsize = 8,
3988 			.maxauthsize	= GHASH_DIGEST_SIZE,
3989 			.setkey = chcr_aead_rfc4309_setkey,
3990 			.setauthsize = chcr_4106_4309_setauthsize,
3991 		}
3992 	},
3993 	{
3994 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3995 		.is_registered = 0,
3996 		.alg.aead = {
3997 			.base = {
3998 				.cra_name = "authenc(hmac(sha1),cbc(aes))",
3999 				.cra_driver_name =
4000 					"authenc-hmac-sha1-cbc-aes-chcr",
4001 				.cra_blocksize	 = AES_BLOCK_SIZE,
4002 				.cra_priority = CHCR_AEAD_PRIORITY,
4003 				.cra_ctxsize =	sizeof(struct chcr_context) +
4004 						sizeof(struct chcr_aead_ctx) +
4005 						sizeof(struct chcr_authenc_ctx),
4006 
4007 			},
4008 			.ivsize = AES_BLOCK_SIZE,
4009 			.maxauthsize = SHA1_DIGEST_SIZE,
4010 			.setkey = chcr_authenc_setkey,
4011 			.setauthsize = chcr_authenc_setauthsize,
4012 		}
4013 	},
4014 	{
4015 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4016 		.is_registered = 0,
4017 		.alg.aead = {
4018 			.base = {
4019 
4020 				.cra_name = "authenc(hmac(sha256),cbc(aes))",
4021 				.cra_driver_name =
4022 					"authenc-hmac-sha256-cbc-aes-chcr",
4023 				.cra_blocksize	 = AES_BLOCK_SIZE,
4024 				.cra_priority = CHCR_AEAD_PRIORITY,
4025 				.cra_ctxsize =	sizeof(struct chcr_context) +
4026 						sizeof(struct chcr_aead_ctx) +
4027 						sizeof(struct chcr_authenc_ctx),
4028 
4029 			},
4030 			.ivsize = AES_BLOCK_SIZE,
4031 			.maxauthsize	= SHA256_DIGEST_SIZE,
4032 			.setkey = chcr_authenc_setkey,
4033 			.setauthsize = chcr_authenc_setauthsize,
4034 		}
4035 	},
4036 	{
4037 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4038 		.is_registered = 0,
4039 		.alg.aead = {
4040 			.base = {
4041 				.cra_name = "authenc(hmac(sha224),cbc(aes))",
4042 				.cra_driver_name =
4043 					"authenc-hmac-sha224-cbc-aes-chcr",
4044 				.cra_blocksize	 = AES_BLOCK_SIZE,
4045 				.cra_priority = CHCR_AEAD_PRIORITY,
4046 				.cra_ctxsize =	sizeof(struct chcr_context) +
4047 						sizeof(struct chcr_aead_ctx) +
4048 						sizeof(struct chcr_authenc_ctx),
4049 			},
4050 			.ivsize = AES_BLOCK_SIZE,
4051 			.maxauthsize = SHA224_DIGEST_SIZE,
4052 			.setkey = chcr_authenc_setkey,
4053 			.setauthsize = chcr_authenc_setauthsize,
4054 		}
4055 	},
4056 	{
4057 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4058 		.is_registered = 0,
4059 		.alg.aead = {
4060 			.base = {
4061 				.cra_name = "authenc(hmac(sha384),cbc(aes))",
4062 				.cra_driver_name =
4063 					"authenc-hmac-sha384-cbc-aes-chcr",
4064 				.cra_blocksize	 = AES_BLOCK_SIZE,
4065 				.cra_priority = CHCR_AEAD_PRIORITY,
4066 				.cra_ctxsize =	sizeof(struct chcr_context) +
4067 						sizeof(struct chcr_aead_ctx) +
4068 						sizeof(struct chcr_authenc_ctx),
4069 
4070 			},
4071 			.ivsize = AES_BLOCK_SIZE,
4072 			.maxauthsize = SHA384_DIGEST_SIZE,
4073 			.setkey = chcr_authenc_setkey,
4074 			.setauthsize = chcr_authenc_setauthsize,
4075 		}
4076 	},
4077 	{
4078 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4079 		.is_registered = 0,
4080 		.alg.aead = {
4081 			.base = {
4082 				.cra_name = "authenc(hmac(sha512),cbc(aes))",
4083 				.cra_driver_name =
4084 					"authenc-hmac-sha512-cbc-aes-chcr",
4085 				.cra_blocksize	 = AES_BLOCK_SIZE,
4086 				.cra_priority = CHCR_AEAD_PRIORITY,
4087 				.cra_ctxsize =	sizeof(struct chcr_context) +
4088 						sizeof(struct chcr_aead_ctx) +
4089 						sizeof(struct chcr_authenc_ctx),
4090 
4091 			},
4092 			.ivsize = AES_BLOCK_SIZE,
4093 			.maxauthsize = SHA512_DIGEST_SIZE,
4094 			.setkey = chcr_authenc_setkey,
4095 			.setauthsize = chcr_authenc_setauthsize,
4096 		}
4097 	},
4098 	{
4099 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4100 		.is_registered = 0,
4101 		.alg.aead = {
4102 			.base = {
4103 				.cra_name = "authenc(digest_null,cbc(aes))",
4104 				.cra_driver_name =
4105 					"authenc-digest_null-cbc-aes-chcr",
4106 				.cra_blocksize	 = AES_BLOCK_SIZE,
4107 				.cra_priority = CHCR_AEAD_PRIORITY,
4108 				.cra_ctxsize =	sizeof(struct chcr_context) +
4109 						sizeof(struct chcr_aead_ctx) +
4110 						sizeof(struct chcr_authenc_ctx),
4111 
4112 			},
4113 			.ivsize  = AES_BLOCK_SIZE,
4114 			.maxauthsize = 0,
4115 			.setkey  = chcr_aead_digest_null_setkey,
4116 			.setauthsize = chcr_authenc_null_setauthsize,
4117 		}
4118 	},
4119 	{
4120 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4121 		.is_registered = 0,
4122 		.alg.aead = {
4123 			.base = {
4124 				.cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4125 				.cra_driver_name =
4126 				"authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4127 				.cra_blocksize	 = 1,
4128 				.cra_priority = CHCR_AEAD_PRIORITY,
4129 				.cra_ctxsize =	sizeof(struct chcr_context) +
4130 						sizeof(struct chcr_aead_ctx) +
4131 						sizeof(struct chcr_authenc_ctx),
4132 
4133 			},
4134 			.ivsize = CTR_RFC3686_IV_SIZE,
4135 			.maxauthsize = SHA1_DIGEST_SIZE,
4136 			.setkey = chcr_authenc_setkey,
4137 			.setauthsize = chcr_authenc_setauthsize,
4138 		}
4139 	},
4140 	{
4141 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4142 		.is_registered = 0,
4143 		.alg.aead = {
4144 			.base = {
4145 
4146 				.cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4147 				.cra_driver_name =
4148 				"authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4149 				.cra_blocksize	 = 1,
4150 				.cra_priority = CHCR_AEAD_PRIORITY,
4151 				.cra_ctxsize =	sizeof(struct chcr_context) +
4152 						sizeof(struct chcr_aead_ctx) +
4153 						sizeof(struct chcr_authenc_ctx),
4154 
4155 			},
4156 			.ivsize = CTR_RFC3686_IV_SIZE,
4157 			.maxauthsize	= SHA256_DIGEST_SIZE,
4158 			.setkey = chcr_authenc_setkey,
4159 			.setauthsize = chcr_authenc_setauthsize,
4160 		}
4161 	},
4162 	{
4163 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4164 		.is_registered = 0,
4165 		.alg.aead = {
4166 			.base = {
4167 				.cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4168 				.cra_driver_name =
4169 				"authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4170 				.cra_blocksize	 = 1,
4171 				.cra_priority = CHCR_AEAD_PRIORITY,
4172 				.cra_ctxsize =	sizeof(struct chcr_context) +
4173 						sizeof(struct chcr_aead_ctx) +
4174 						sizeof(struct chcr_authenc_ctx),
4175 			},
4176 			.ivsize = CTR_RFC3686_IV_SIZE,
4177 			.maxauthsize = SHA224_DIGEST_SIZE,
4178 			.setkey = chcr_authenc_setkey,
4179 			.setauthsize = chcr_authenc_setauthsize,
4180 		}
4181 	},
4182 	{
4183 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4184 		.is_registered = 0,
4185 		.alg.aead = {
4186 			.base = {
4187 				.cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4188 				.cra_driver_name =
4189 				"authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4190 				.cra_blocksize	 = 1,
4191 				.cra_priority = CHCR_AEAD_PRIORITY,
4192 				.cra_ctxsize =	sizeof(struct chcr_context) +
4193 						sizeof(struct chcr_aead_ctx) +
4194 						sizeof(struct chcr_authenc_ctx),
4195 
4196 			},
4197 			.ivsize = CTR_RFC3686_IV_SIZE,
4198 			.maxauthsize = SHA384_DIGEST_SIZE,
4199 			.setkey = chcr_authenc_setkey,
4200 			.setauthsize = chcr_authenc_setauthsize,
4201 		}
4202 	},
4203 	{
4204 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4205 		.is_registered = 0,
4206 		.alg.aead = {
4207 			.base = {
4208 				.cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4209 				.cra_driver_name =
4210 				"authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4211 				.cra_blocksize	 = 1,
4212 				.cra_priority = CHCR_AEAD_PRIORITY,
4213 				.cra_ctxsize =	sizeof(struct chcr_context) +
4214 						sizeof(struct chcr_aead_ctx) +
4215 						sizeof(struct chcr_authenc_ctx),
4216 
4217 			},
4218 			.ivsize = CTR_RFC3686_IV_SIZE,
4219 			.maxauthsize = SHA512_DIGEST_SIZE,
4220 			.setkey = chcr_authenc_setkey,
4221 			.setauthsize = chcr_authenc_setauthsize,
4222 		}
4223 	},
4224 	{
4225 		.type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4226 		.is_registered = 0,
4227 		.alg.aead = {
4228 			.base = {
4229 				.cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4230 				.cra_driver_name =
4231 				"authenc-digest_null-rfc3686-ctr-aes-chcr",
4232 				.cra_blocksize	 = 1,
4233 				.cra_priority = CHCR_AEAD_PRIORITY,
4234 				.cra_ctxsize =	sizeof(struct chcr_context) +
4235 						sizeof(struct chcr_aead_ctx) +
4236 						sizeof(struct chcr_authenc_ctx),
4237 
4238 			},
4239 			.ivsize  = CTR_RFC3686_IV_SIZE,
4240 			.maxauthsize = 0,
4241 			.setkey  = chcr_aead_digest_null_setkey,
4242 			.setauthsize = chcr_authenc_null_setauthsize,
4243 		}
4244 	},
4245 };
4246 
4247 /*
4248  *	chcr_unregister_alg - Deregister crypto algorithms with
4249  *	kernel framework.
4250  */
chcr_unregister_alg(void)4251 static int chcr_unregister_alg(void)
4252 {
4253 	int i;
4254 
4255 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4256 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4257 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4258 			if (driver_algs[i].is_registered)
4259 				crypto_unregister_alg(
4260 						&driver_algs[i].alg.crypto);
4261 			break;
4262 		case CRYPTO_ALG_TYPE_AEAD:
4263 			if (driver_algs[i].is_registered)
4264 				crypto_unregister_aead(
4265 						&driver_algs[i].alg.aead);
4266 			break;
4267 		case CRYPTO_ALG_TYPE_AHASH:
4268 			if (driver_algs[i].is_registered)
4269 				crypto_unregister_ahash(
4270 						&driver_algs[i].alg.hash);
4271 			break;
4272 		}
4273 		driver_algs[i].is_registered = 0;
4274 	}
4275 	return 0;
4276 }
4277 
4278 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4279 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4280 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4281 
4282 /*
4283  *	chcr_register_alg - Register crypto algorithms with kernel framework.
4284  */
chcr_register_alg(void)4285 static int chcr_register_alg(void)
4286 {
4287 	struct crypto_alg ai;
4288 	struct ahash_alg *a_hash;
4289 	int err = 0, i;
4290 	char *name = NULL;
4291 
4292 	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4293 		if (driver_algs[i].is_registered)
4294 			continue;
4295 		switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4296 		case CRYPTO_ALG_TYPE_ABLKCIPHER:
4297 			driver_algs[i].alg.crypto.cra_priority =
4298 				CHCR_CRA_PRIORITY;
4299 			driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4300 			driver_algs[i].alg.crypto.cra_flags =
4301 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4302 				CRYPTO_ALG_NEED_FALLBACK;
4303 			driver_algs[i].alg.crypto.cra_ctxsize =
4304 				sizeof(struct chcr_context) +
4305 				sizeof(struct ablk_ctx);
4306 			driver_algs[i].alg.crypto.cra_alignmask = 0;
4307 			driver_algs[i].alg.crypto.cra_type =
4308 				&crypto_ablkcipher_type;
4309 			err = crypto_register_alg(&driver_algs[i].alg.crypto);
4310 			name = driver_algs[i].alg.crypto.cra_driver_name;
4311 			break;
4312 		case CRYPTO_ALG_TYPE_AEAD:
4313 			driver_algs[i].alg.aead.base.cra_flags =
4314 				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4315 			driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4316 			driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4317 			driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4318 			driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4319 			driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4320 			err = crypto_register_aead(&driver_algs[i].alg.aead);
4321 			name = driver_algs[i].alg.aead.base.cra_driver_name;
4322 			break;
4323 		case CRYPTO_ALG_TYPE_AHASH:
4324 			a_hash = &driver_algs[i].alg.hash;
4325 			a_hash->update = chcr_ahash_update;
4326 			a_hash->final = chcr_ahash_final;
4327 			a_hash->finup = chcr_ahash_finup;
4328 			a_hash->digest = chcr_ahash_digest;
4329 			a_hash->export = chcr_ahash_export;
4330 			a_hash->import = chcr_ahash_import;
4331 			a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4332 			a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4333 			a_hash->halg.base.cra_module = THIS_MODULE;
4334 			a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4335 			a_hash->halg.base.cra_alignmask = 0;
4336 			a_hash->halg.base.cra_exit = NULL;
4337 
4338 			if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4339 				a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4340 				a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4341 				a_hash->init = chcr_hmac_init;
4342 				a_hash->setkey = chcr_ahash_setkey;
4343 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4344 			} else {
4345 				a_hash->init = chcr_sha_init;
4346 				a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4347 				a_hash->halg.base.cra_init = chcr_sha_cra_init;
4348 			}
4349 			err = crypto_register_ahash(&driver_algs[i].alg.hash);
4350 			ai = driver_algs[i].alg.hash.halg.base;
4351 			name = ai.cra_driver_name;
4352 			break;
4353 		}
4354 		if (err) {
4355 			pr_err("chcr : %s : Algorithm registration failed\n",
4356 			       name);
4357 			goto register_err;
4358 		} else {
4359 			driver_algs[i].is_registered = 1;
4360 		}
4361 	}
4362 	return 0;
4363 
4364 register_err:
4365 	chcr_unregister_alg();
4366 	return err;
4367 }
4368 
4369 /*
4370  *	start_crypto - Register the crypto algorithms.
4371  *	This should called once when the first device comesup. After this
4372  *	kernel will start calling driver APIs for crypto operations.
4373  */
start_crypto(void)4374 int start_crypto(void)
4375 {
4376 	return chcr_register_alg();
4377 }
4378 
4379 /*
4380  *	stop_crypto - Deregister all the crypto algorithms with kernel.
4381  *	This should be called once when the last device goes down. After this
4382  *	kernel will not call the driver API for crypto operations.
4383  */
stop_crypto(void)4384 int stop_crypto(void)
4385 {
4386 	chcr_unregister_alg();
4387 	return 0;
4388 }
4389