1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /*
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
5 */
6
7 #include "compat.h"
8 #include "regs.h"
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
12 #include "error.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
15 #include "key_gen.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/dma-mapping.h>
20 #include <linux/fsl/mc.h>
21 #include <linux/kernel.h>
22 #include <soc/fsl/dpaa2-io.h>
23 #include <soc/fsl/dpaa2-fd.h>
24 #include <crypto/xts.h>
25 #include <asm/unaligned.h>
26
27 #define CAAM_CRA_PRIORITY 2000
28
29 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
30 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
31 SHA512_DIGEST_SIZE * 2)
32
33 /*
34 * This is a cache of buffers, from which the users of CAAM QI driver
35 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
36 * NOTE: A more elegant solution would be to have some headroom in the frames
37 * being processed. This can be added by the dpaa2-eth driver. This would
38 * pose a problem for userspace application processing which cannot
39 * know of this limitation. So for now, this will work.
40 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
41 */
42 static struct kmem_cache *qi_cache;
43
44 struct caam_alg_entry {
45 struct device *dev;
46 int class1_alg_type;
47 int class2_alg_type;
48 bool rfc3686;
49 bool geniv;
50 bool nodkp;
51 };
52
53 struct caam_aead_alg {
54 struct aead_alg aead;
55 struct caam_alg_entry caam;
56 bool registered;
57 };
58
59 struct caam_skcipher_alg {
60 struct skcipher_alg skcipher;
61 struct caam_alg_entry caam;
62 bool registered;
63 };
64
65 /**
66 * struct caam_ctx - per-session context
67 * @flc: Flow Contexts array
68 * @key: [authentication key], encryption key
69 * @flc_dma: I/O virtual addresses of the Flow Contexts
70 * @key_dma: I/O virtual address of the key
71 * @dir: DMA direction for mapping key and Flow Contexts
72 * @dev: dpseci device
73 * @adata: authentication algorithm details
74 * @cdata: encryption algorithm details
75 * @authsize: authentication tag (a.k.a. ICV / MAC) size
76 * @xts_key_fallback: true if fallback tfm needs to be used due
77 * to unsupported xts key lengths
78 * @fallback: xts fallback tfm
79 */
80 struct caam_ctx {
81 struct caam_flc flc[NUM_OP];
82 u8 key[CAAM_MAX_KEY_SIZE];
83 dma_addr_t flc_dma[NUM_OP];
84 dma_addr_t key_dma;
85 enum dma_data_direction dir;
86 struct device *dev;
87 struct alginfo adata;
88 struct alginfo cdata;
89 unsigned int authsize;
90 bool xts_key_fallback;
91 struct crypto_skcipher *fallback;
92 };
93
dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv * priv,dma_addr_t iova_addr)94 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
95 dma_addr_t iova_addr)
96 {
97 phys_addr_t phys_addr;
98
99 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
100 iova_addr;
101
102 return phys_to_virt(phys_addr);
103 }
104
105 /*
106 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
107 *
108 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
109 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
110 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
111 * hosting 16 SG entries.
112 *
113 * @flags - flags that would be used for the equivalent kmalloc(..) call
114 *
115 * Returns a pointer to a retrieved buffer on success or NULL on failure.
116 */
qi_cache_zalloc(gfp_t flags)117 static inline void *qi_cache_zalloc(gfp_t flags)
118 {
119 return kmem_cache_zalloc(qi_cache, flags);
120 }
121
122 /*
123 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
124 *
125 * @obj - buffer previously allocated by qi_cache_zalloc
126 *
127 * No checking is being done, the call is a passthrough call to
128 * kmem_cache_free(...)
129 */
qi_cache_free(void * obj)130 static inline void qi_cache_free(void *obj)
131 {
132 kmem_cache_free(qi_cache, obj);
133 }
134
to_caam_req(struct crypto_async_request * areq)135 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
136 {
137 switch (crypto_tfm_alg_type(areq->tfm)) {
138 case CRYPTO_ALG_TYPE_SKCIPHER:
139 return skcipher_request_ctx_dma(skcipher_request_cast(areq));
140 case CRYPTO_ALG_TYPE_AEAD:
141 return aead_request_ctx_dma(
142 container_of(areq, struct aead_request, base));
143 case CRYPTO_ALG_TYPE_AHASH:
144 return ahash_request_ctx_dma(ahash_request_cast(areq));
145 default:
146 return ERR_PTR(-EINVAL);
147 }
148 }
149
caam_unmap(struct device * dev,struct scatterlist * src,struct scatterlist * dst,int src_nents,int dst_nents,dma_addr_t iv_dma,int ivsize,enum dma_data_direction iv_dir,dma_addr_t qm_sg_dma,int qm_sg_bytes)150 static void caam_unmap(struct device *dev, struct scatterlist *src,
151 struct scatterlist *dst, int src_nents,
152 int dst_nents, dma_addr_t iv_dma, int ivsize,
153 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
154 int qm_sg_bytes)
155 {
156 if (dst != src) {
157 if (src_nents)
158 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
159 if (dst_nents)
160 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
161 } else {
162 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
163 }
164
165 if (iv_dma)
166 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
167
168 if (qm_sg_bytes)
169 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
170 }
171
aead_set_sh_desc(struct crypto_aead * aead)172 static int aead_set_sh_desc(struct crypto_aead *aead)
173 {
174 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
175 typeof(*alg), aead);
176 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
177 unsigned int ivsize = crypto_aead_ivsize(aead);
178 struct device *dev = ctx->dev;
179 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
180 struct caam_flc *flc;
181 u32 *desc;
182 u32 ctx1_iv_off = 0;
183 u32 *nonce = NULL;
184 unsigned int data_len[2];
185 u32 inl_mask;
186 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
187 OP_ALG_AAI_CTR_MOD128);
188 const bool is_rfc3686 = alg->caam.rfc3686;
189
190 if (!ctx->cdata.keylen || !ctx->authsize)
191 return 0;
192
193 /*
194 * AES-CTR needs to load IV in CONTEXT1 reg
195 * at an offset of 128bits (16bytes)
196 * CONTEXT1[255:128] = IV
197 */
198 if (ctr_mode)
199 ctx1_iv_off = 16;
200
201 /*
202 * RFC3686 specific:
203 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
204 */
205 if (is_rfc3686) {
206 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
207 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
208 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
209 }
210
211 /*
212 * In case |user key| > |derived key|, using DKP<imm,imm> would result
213 * in invalid opcodes (last bytes of user key) in the resulting
214 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
215 * addresses are needed.
216 */
217 ctx->adata.key_virt = ctx->key;
218 ctx->adata.key_dma = ctx->key_dma;
219
220 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
221 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
222
223 data_len[0] = ctx->adata.keylen_pad;
224 data_len[1] = ctx->cdata.keylen;
225
226 /* aead_encrypt shared descriptor */
227 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
228 DESC_QI_AEAD_ENC_LEN) +
229 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
230 DESC_JOB_IO_LEN, data_len, &inl_mask,
231 ARRAY_SIZE(data_len)) < 0)
232 return -EINVAL;
233
234 ctx->adata.key_inline = !!(inl_mask & 1);
235 ctx->cdata.key_inline = !!(inl_mask & 2);
236
237 flc = &ctx->flc[ENCRYPT];
238 desc = flc->sh_desc;
239
240 if (alg->caam.geniv)
241 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
242 ivsize, ctx->authsize, is_rfc3686,
243 nonce, ctx1_iv_off, true,
244 priv->sec_attr.era);
245 else
246 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
247 ivsize, ctx->authsize, is_rfc3686, nonce,
248 ctx1_iv_off, true, priv->sec_attr.era);
249
250 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
251 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
252 sizeof(flc->flc) + desc_bytes(desc),
253 ctx->dir);
254
255 /* aead_decrypt shared descriptor */
256 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
257 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
258 DESC_JOB_IO_LEN, data_len, &inl_mask,
259 ARRAY_SIZE(data_len)) < 0)
260 return -EINVAL;
261
262 ctx->adata.key_inline = !!(inl_mask & 1);
263 ctx->cdata.key_inline = !!(inl_mask & 2);
264
265 flc = &ctx->flc[DECRYPT];
266 desc = flc->sh_desc;
267 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
268 ivsize, ctx->authsize, alg->caam.geniv,
269 is_rfc3686, nonce, ctx1_iv_off, true,
270 priv->sec_attr.era);
271 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
272 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
273 sizeof(flc->flc) + desc_bytes(desc),
274 ctx->dir);
275
276 return 0;
277 }
278
aead_setauthsize(struct crypto_aead * authenc,unsigned int authsize)279 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
280 {
281 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
282
283 ctx->authsize = authsize;
284 aead_set_sh_desc(authenc);
285
286 return 0;
287 }
288
aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)289 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
290 unsigned int keylen)
291 {
292 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
293 struct device *dev = ctx->dev;
294 struct crypto_authenc_keys keys;
295
296 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
297 goto badkey;
298
299 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
300 keys.authkeylen + keys.enckeylen, keys.enckeylen,
301 keys.authkeylen);
302 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
303 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
304
305 ctx->adata.keylen = keys.authkeylen;
306 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
307 OP_ALG_ALGSEL_MASK);
308
309 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
310 goto badkey;
311
312 memcpy(ctx->key, keys.authkey, keys.authkeylen);
313 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
314 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
315 keys.enckeylen, ctx->dir);
316 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
317 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
318 ctx->adata.keylen_pad + keys.enckeylen, 1);
319
320 ctx->cdata.keylen = keys.enckeylen;
321
322 memzero_explicit(&keys, sizeof(keys));
323 return aead_set_sh_desc(aead);
324 badkey:
325 memzero_explicit(&keys, sizeof(keys));
326 return -EINVAL;
327 }
328
des3_aead_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)329 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
330 unsigned int keylen)
331 {
332 struct crypto_authenc_keys keys;
333 int err;
334
335 err = crypto_authenc_extractkeys(&keys, key, keylen);
336 if (unlikely(err))
337 goto out;
338
339 err = -EINVAL;
340 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
341 goto out;
342
343 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
344 aead_setkey(aead, key, keylen);
345
346 out:
347 memzero_explicit(&keys, sizeof(keys));
348 return err;
349 }
350
aead_edesc_alloc(struct aead_request * req,bool encrypt)351 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
352 bool encrypt)
353 {
354 struct crypto_aead *aead = crypto_aead_reqtfm(req);
355 struct caam_request *req_ctx = aead_request_ctx_dma(req);
356 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
357 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
358 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
359 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
360 typeof(*alg), aead);
361 struct device *dev = ctx->dev;
362 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
363 GFP_KERNEL : GFP_ATOMIC;
364 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
365 int src_len, dst_len = 0;
366 struct aead_edesc *edesc;
367 dma_addr_t qm_sg_dma, iv_dma = 0;
368 int ivsize = 0;
369 unsigned int authsize = ctx->authsize;
370 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
371 int in_len, out_len;
372 struct dpaa2_sg_entry *sg_table;
373
374 /* allocate space for base edesc, link tables and IV */
375 edesc = qi_cache_zalloc(flags);
376 if (unlikely(!edesc)) {
377 dev_err(dev, "could not allocate extended descriptor\n");
378 return ERR_PTR(-ENOMEM);
379 }
380
381 if (unlikely(req->dst != req->src)) {
382 src_len = req->assoclen + req->cryptlen;
383 dst_len = src_len + (encrypt ? authsize : (-authsize));
384
385 src_nents = sg_nents_for_len(req->src, src_len);
386 if (unlikely(src_nents < 0)) {
387 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
388 src_len);
389 qi_cache_free(edesc);
390 return ERR_PTR(src_nents);
391 }
392
393 dst_nents = sg_nents_for_len(req->dst, dst_len);
394 if (unlikely(dst_nents < 0)) {
395 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
396 dst_len);
397 qi_cache_free(edesc);
398 return ERR_PTR(dst_nents);
399 }
400
401 if (src_nents) {
402 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
403 DMA_TO_DEVICE);
404 if (unlikely(!mapped_src_nents)) {
405 dev_err(dev, "unable to map source\n");
406 qi_cache_free(edesc);
407 return ERR_PTR(-ENOMEM);
408 }
409 } else {
410 mapped_src_nents = 0;
411 }
412
413 if (dst_nents) {
414 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
415 DMA_FROM_DEVICE);
416 if (unlikely(!mapped_dst_nents)) {
417 dev_err(dev, "unable to map destination\n");
418 dma_unmap_sg(dev, req->src, src_nents,
419 DMA_TO_DEVICE);
420 qi_cache_free(edesc);
421 return ERR_PTR(-ENOMEM);
422 }
423 } else {
424 mapped_dst_nents = 0;
425 }
426 } else {
427 src_len = req->assoclen + req->cryptlen +
428 (encrypt ? authsize : 0);
429
430 src_nents = sg_nents_for_len(req->src, src_len);
431 if (unlikely(src_nents < 0)) {
432 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
433 src_len);
434 qi_cache_free(edesc);
435 return ERR_PTR(src_nents);
436 }
437
438 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
439 DMA_BIDIRECTIONAL);
440 if (unlikely(!mapped_src_nents)) {
441 dev_err(dev, "unable to map source\n");
442 qi_cache_free(edesc);
443 return ERR_PTR(-ENOMEM);
444 }
445 }
446
447 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
448 ivsize = crypto_aead_ivsize(aead);
449
450 /*
451 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
452 * Input is not contiguous.
453 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
454 * the end of the table by allocating more S/G entries. Logic:
455 * if (src != dst && output S/G)
456 * pad output S/G, if needed
457 * else if (src == dst && S/G)
458 * overlapping S/Gs; pad one of them
459 * else if (input S/G) ...
460 * pad input S/G, if needed
461 */
462 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
463 if (mapped_dst_nents > 1)
464 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
465 else if ((req->src == req->dst) && (mapped_src_nents > 1))
466 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
467 1 + !!ivsize +
468 pad_sg_nents(mapped_src_nents));
469 else
470 qm_sg_nents = pad_sg_nents(qm_sg_nents);
471
472 sg_table = &edesc->sgt[0];
473 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
474 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
475 CAAM_QI_MEMCACHE_SIZE)) {
476 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
477 qm_sg_nents, ivsize);
478 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
479 0, DMA_NONE, 0, 0);
480 qi_cache_free(edesc);
481 return ERR_PTR(-ENOMEM);
482 }
483
484 if (ivsize) {
485 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
486
487 /* Make sure IV is located in a DMAable area */
488 memcpy(iv, req->iv, ivsize);
489
490 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
491 if (dma_mapping_error(dev, iv_dma)) {
492 dev_err(dev, "unable to map IV\n");
493 caam_unmap(dev, req->src, req->dst, src_nents,
494 dst_nents, 0, 0, DMA_NONE, 0, 0);
495 qi_cache_free(edesc);
496 return ERR_PTR(-ENOMEM);
497 }
498 }
499
500 edesc->src_nents = src_nents;
501 edesc->dst_nents = dst_nents;
502 edesc->iv_dma = iv_dma;
503
504 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
505 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
506 /*
507 * The associated data comes already with the IV but we need
508 * to skip it when we authenticate or encrypt...
509 */
510 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
511 else
512 edesc->assoclen = cpu_to_caam32(req->assoclen);
513 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
514 DMA_TO_DEVICE);
515 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
516 dev_err(dev, "unable to map assoclen\n");
517 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
518 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
519 qi_cache_free(edesc);
520 return ERR_PTR(-ENOMEM);
521 }
522
523 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
524 qm_sg_index++;
525 if (ivsize) {
526 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
527 qm_sg_index++;
528 }
529 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
530 qm_sg_index += mapped_src_nents;
531
532 if (mapped_dst_nents > 1)
533 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
534
535 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
536 if (dma_mapping_error(dev, qm_sg_dma)) {
537 dev_err(dev, "unable to map S/G table\n");
538 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
539 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
540 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
541 qi_cache_free(edesc);
542 return ERR_PTR(-ENOMEM);
543 }
544
545 edesc->qm_sg_dma = qm_sg_dma;
546 edesc->qm_sg_bytes = qm_sg_bytes;
547
548 out_len = req->assoclen + req->cryptlen +
549 (encrypt ? ctx->authsize : (-ctx->authsize));
550 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
551
552 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
553 dpaa2_fl_set_final(in_fle, true);
554 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
555 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
556 dpaa2_fl_set_len(in_fle, in_len);
557
558 if (req->dst == req->src) {
559 if (mapped_src_nents == 1) {
560 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
561 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
562 } else {
563 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
564 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
565 (1 + !!ivsize) * sizeof(*sg_table));
566 }
567 } else if (!mapped_dst_nents) {
568 /*
569 * crypto engine requires the output entry to be present when
570 * "frame list" FD is used.
571 * Since engine does not support FMT=2'b11 (unused entry type),
572 * leaving out_fle zeroized is the best option.
573 */
574 goto skip_out_fle;
575 } else if (mapped_dst_nents == 1) {
576 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
577 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
578 } else {
579 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
580 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
581 sizeof(*sg_table));
582 }
583
584 dpaa2_fl_set_len(out_fle, out_len);
585
586 skip_out_fle:
587 return edesc;
588 }
589
chachapoly_set_sh_desc(struct crypto_aead * aead)590 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
591 {
592 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
593 unsigned int ivsize = crypto_aead_ivsize(aead);
594 struct device *dev = ctx->dev;
595 struct caam_flc *flc;
596 u32 *desc;
597
598 if (!ctx->cdata.keylen || !ctx->authsize)
599 return 0;
600
601 flc = &ctx->flc[ENCRYPT];
602 desc = flc->sh_desc;
603 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
604 ctx->authsize, true, true);
605 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
606 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
607 sizeof(flc->flc) + desc_bytes(desc),
608 ctx->dir);
609
610 flc = &ctx->flc[DECRYPT];
611 desc = flc->sh_desc;
612 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
613 ctx->authsize, false, true);
614 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
615 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
616 sizeof(flc->flc) + desc_bytes(desc),
617 ctx->dir);
618
619 return 0;
620 }
621
chachapoly_setauthsize(struct crypto_aead * aead,unsigned int authsize)622 static int chachapoly_setauthsize(struct crypto_aead *aead,
623 unsigned int authsize)
624 {
625 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
626
627 if (authsize != POLY1305_DIGEST_SIZE)
628 return -EINVAL;
629
630 ctx->authsize = authsize;
631 return chachapoly_set_sh_desc(aead);
632 }
633
chachapoly_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)634 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
635 unsigned int keylen)
636 {
637 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
638 unsigned int ivsize = crypto_aead_ivsize(aead);
639 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
640
641 if (keylen != CHACHA_KEY_SIZE + saltlen)
642 return -EINVAL;
643
644 ctx->cdata.key_virt = key;
645 ctx->cdata.keylen = keylen - saltlen;
646
647 return chachapoly_set_sh_desc(aead);
648 }
649
gcm_set_sh_desc(struct crypto_aead * aead)650 static int gcm_set_sh_desc(struct crypto_aead *aead)
651 {
652 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
653 struct device *dev = ctx->dev;
654 unsigned int ivsize = crypto_aead_ivsize(aead);
655 struct caam_flc *flc;
656 u32 *desc;
657 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
658 ctx->cdata.keylen;
659
660 if (!ctx->cdata.keylen || !ctx->authsize)
661 return 0;
662
663 /*
664 * AES GCM encrypt shared descriptor
665 * Job Descriptor and Shared Descriptor
666 * must fit into the 64-word Descriptor h/w Buffer
667 */
668 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
669 ctx->cdata.key_inline = true;
670 ctx->cdata.key_virt = ctx->key;
671 } else {
672 ctx->cdata.key_inline = false;
673 ctx->cdata.key_dma = ctx->key_dma;
674 }
675
676 flc = &ctx->flc[ENCRYPT];
677 desc = flc->sh_desc;
678 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
679 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
680 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
681 sizeof(flc->flc) + desc_bytes(desc),
682 ctx->dir);
683
684 /*
685 * Job Descriptor and Shared Descriptors
686 * must all fit into the 64-word Descriptor h/w Buffer
687 */
688 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
689 ctx->cdata.key_inline = true;
690 ctx->cdata.key_virt = ctx->key;
691 } else {
692 ctx->cdata.key_inline = false;
693 ctx->cdata.key_dma = ctx->key_dma;
694 }
695
696 flc = &ctx->flc[DECRYPT];
697 desc = flc->sh_desc;
698 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
699 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
700 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
701 sizeof(flc->flc) + desc_bytes(desc),
702 ctx->dir);
703
704 return 0;
705 }
706
gcm_setauthsize(struct crypto_aead * authenc,unsigned int authsize)707 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
708 {
709 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
710 int err;
711
712 err = crypto_gcm_check_authsize(authsize);
713 if (err)
714 return err;
715
716 ctx->authsize = authsize;
717 gcm_set_sh_desc(authenc);
718
719 return 0;
720 }
721
gcm_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)722 static int gcm_setkey(struct crypto_aead *aead,
723 const u8 *key, unsigned int keylen)
724 {
725 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
726 struct device *dev = ctx->dev;
727 int ret;
728
729 ret = aes_check_keylen(keylen);
730 if (ret)
731 return ret;
732 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
733 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
734
735 memcpy(ctx->key, key, keylen);
736 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
737 ctx->cdata.keylen = keylen;
738
739 return gcm_set_sh_desc(aead);
740 }
741
rfc4106_set_sh_desc(struct crypto_aead * aead)742 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
743 {
744 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
745 struct device *dev = ctx->dev;
746 unsigned int ivsize = crypto_aead_ivsize(aead);
747 struct caam_flc *flc;
748 u32 *desc;
749 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
750 ctx->cdata.keylen;
751
752 if (!ctx->cdata.keylen || !ctx->authsize)
753 return 0;
754
755 ctx->cdata.key_virt = ctx->key;
756
757 /*
758 * RFC4106 encrypt shared descriptor
759 * Job Descriptor and Shared Descriptor
760 * must fit into the 64-word Descriptor h/w Buffer
761 */
762 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
763 ctx->cdata.key_inline = true;
764 } else {
765 ctx->cdata.key_inline = false;
766 ctx->cdata.key_dma = ctx->key_dma;
767 }
768
769 flc = &ctx->flc[ENCRYPT];
770 desc = flc->sh_desc;
771 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
772 true);
773 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
774 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
775 sizeof(flc->flc) + desc_bytes(desc),
776 ctx->dir);
777
778 /*
779 * Job Descriptor and Shared Descriptors
780 * must all fit into the 64-word Descriptor h/w Buffer
781 */
782 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
783 ctx->cdata.key_inline = true;
784 } else {
785 ctx->cdata.key_inline = false;
786 ctx->cdata.key_dma = ctx->key_dma;
787 }
788
789 flc = &ctx->flc[DECRYPT];
790 desc = flc->sh_desc;
791 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
792 true);
793 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
794 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
795 sizeof(flc->flc) + desc_bytes(desc),
796 ctx->dir);
797
798 return 0;
799 }
800
rfc4106_setauthsize(struct crypto_aead * authenc,unsigned int authsize)801 static int rfc4106_setauthsize(struct crypto_aead *authenc,
802 unsigned int authsize)
803 {
804 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
805 int err;
806
807 err = crypto_rfc4106_check_authsize(authsize);
808 if (err)
809 return err;
810
811 ctx->authsize = authsize;
812 rfc4106_set_sh_desc(authenc);
813
814 return 0;
815 }
816
rfc4106_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)817 static int rfc4106_setkey(struct crypto_aead *aead,
818 const u8 *key, unsigned int keylen)
819 {
820 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
821 struct device *dev = ctx->dev;
822 int ret;
823
824 ret = aes_check_keylen(keylen - 4);
825 if (ret)
826 return ret;
827
828 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
829 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
830
831 memcpy(ctx->key, key, keylen);
832 /*
833 * The last four bytes of the key material are used as the salt value
834 * in the nonce. Update the AES key length.
835 */
836 ctx->cdata.keylen = keylen - 4;
837 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
838 ctx->dir);
839
840 return rfc4106_set_sh_desc(aead);
841 }
842
rfc4543_set_sh_desc(struct crypto_aead * aead)843 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
844 {
845 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
846 struct device *dev = ctx->dev;
847 unsigned int ivsize = crypto_aead_ivsize(aead);
848 struct caam_flc *flc;
849 u32 *desc;
850 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
851 ctx->cdata.keylen;
852
853 if (!ctx->cdata.keylen || !ctx->authsize)
854 return 0;
855
856 ctx->cdata.key_virt = ctx->key;
857
858 /*
859 * RFC4543 encrypt shared descriptor
860 * Job Descriptor and Shared Descriptor
861 * must fit into the 64-word Descriptor h/w Buffer
862 */
863 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
864 ctx->cdata.key_inline = true;
865 } else {
866 ctx->cdata.key_inline = false;
867 ctx->cdata.key_dma = ctx->key_dma;
868 }
869
870 flc = &ctx->flc[ENCRYPT];
871 desc = flc->sh_desc;
872 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
873 true);
874 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
875 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
876 sizeof(flc->flc) + desc_bytes(desc),
877 ctx->dir);
878
879 /*
880 * Job Descriptor and Shared Descriptors
881 * must all fit into the 64-word Descriptor h/w Buffer
882 */
883 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
884 ctx->cdata.key_inline = true;
885 } else {
886 ctx->cdata.key_inline = false;
887 ctx->cdata.key_dma = ctx->key_dma;
888 }
889
890 flc = &ctx->flc[DECRYPT];
891 desc = flc->sh_desc;
892 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
893 true);
894 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
895 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
896 sizeof(flc->flc) + desc_bytes(desc),
897 ctx->dir);
898
899 return 0;
900 }
901
rfc4543_setauthsize(struct crypto_aead * authenc,unsigned int authsize)902 static int rfc4543_setauthsize(struct crypto_aead *authenc,
903 unsigned int authsize)
904 {
905 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
906
907 if (authsize != 16)
908 return -EINVAL;
909
910 ctx->authsize = authsize;
911 rfc4543_set_sh_desc(authenc);
912
913 return 0;
914 }
915
rfc4543_setkey(struct crypto_aead * aead,const u8 * key,unsigned int keylen)916 static int rfc4543_setkey(struct crypto_aead *aead,
917 const u8 *key, unsigned int keylen)
918 {
919 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
920 struct device *dev = ctx->dev;
921 int ret;
922
923 ret = aes_check_keylen(keylen - 4);
924 if (ret)
925 return ret;
926
927 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
928 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
929
930 memcpy(ctx->key, key, keylen);
931 /*
932 * The last four bytes of the key material are used as the salt value
933 * in the nonce. Update the AES key length.
934 */
935 ctx->cdata.keylen = keylen - 4;
936 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
937 ctx->dir);
938
939 return rfc4543_set_sh_desc(aead);
940 }
941
skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen,const u32 ctx1_iv_off)942 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
943 unsigned int keylen, const u32 ctx1_iv_off)
944 {
945 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
946 struct caam_skcipher_alg *alg =
947 container_of(crypto_skcipher_alg(skcipher),
948 struct caam_skcipher_alg, skcipher);
949 struct device *dev = ctx->dev;
950 struct caam_flc *flc;
951 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
952 u32 *desc;
953 const bool is_rfc3686 = alg->caam.rfc3686;
954
955 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
956 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
957
958 ctx->cdata.keylen = keylen;
959 ctx->cdata.key_virt = key;
960 ctx->cdata.key_inline = true;
961
962 /* skcipher_encrypt shared descriptor */
963 flc = &ctx->flc[ENCRYPT];
964 desc = flc->sh_desc;
965 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
966 ctx1_iv_off);
967 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
968 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
969 sizeof(flc->flc) + desc_bytes(desc),
970 ctx->dir);
971
972 /* skcipher_decrypt shared descriptor */
973 flc = &ctx->flc[DECRYPT];
974 desc = flc->sh_desc;
975 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
976 ctx1_iv_off);
977 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
978 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
979 sizeof(flc->flc) + desc_bytes(desc),
980 ctx->dir);
981
982 return 0;
983 }
984
aes_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)985 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
986 const u8 *key, unsigned int keylen)
987 {
988 int err;
989
990 err = aes_check_keylen(keylen);
991 if (err)
992 return err;
993
994 return skcipher_setkey(skcipher, key, keylen, 0);
995 }
996
rfc3686_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)997 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
998 const u8 *key, unsigned int keylen)
999 {
1000 u32 ctx1_iv_off;
1001 int err;
1002
1003 /*
1004 * RFC3686 specific:
1005 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1006 * | *key = {KEY, NONCE}
1007 */
1008 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1009 keylen -= CTR_RFC3686_NONCE_SIZE;
1010
1011 err = aes_check_keylen(keylen);
1012 if (err)
1013 return err;
1014
1015 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1016 }
1017
ctr_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1018 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1019 const u8 *key, unsigned int keylen)
1020 {
1021 u32 ctx1_iv_off;
1022 int err;
1023
1024 /*
1025 * AES-CTR needs to load IV in CONTEXT1 reg
1026 * at an offset of 128bits (16bytes)
1027 * CONTEXT1[255:128] = IV
1028 */
1029 ctx1_iv_off = 16;
1030
1031 err = aes_check_keylen(keylen);
1032 if (err)
1033 return err;
1034
1035 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1036 }
1037
chacha20_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1038 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1039 const u8 *key, unsigned int keylen)
1040 {
1041 if (keylen != CHACHA_KEY_SIZE)
1042 return -EINVAL;
1043
1044 return skcipher_setkey(skcipher, key, keylen, 0);
1045 }
1046
des_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1047 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1048 const u8 *key, unsigned int keylen)
1049 {
1050 return verify_skcipher_des_key(skcipher, key) ?:
1051 skcipher_setkey(skcipher, key, keylen, 0);
1052 }
1053
des3_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1054 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1055 const u8 *key, unsigned int keylen)
1056 {
1057 return verify_skcipher_des3_key(skcipher, key) ?:
1058 skcipher_setkey(skcipher, key, keylen, 0);
1059 }
1060
xts_skcipher_setkey(struct crypto_skcipher * skcipher,const u8 * key,unsigned int keylen)1061 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1062 unsigned int keylen)
1063 {
1064 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1065 struct device *dev = ctx->dev;
1066 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1067 struct caam_flc *flc;
1068 u32 *desc;
1069 int err;
1070
1071 err = xts_verify_key(skcipher, key, keylen);
1072 if (err) {
1073 dev_dbg(dev, "key size mismatch\n");
1074 return err;
1075 }
1076
1077 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1078 ctx->xts_key_fallback = true;
1079
1080 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1081 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1082 if (err)
1083 return err;
1084 }
1085
1086 ctx->cdata.keylen = keylen;
1087 ctx->cdata.key_virt = key;
1088 ctx->cdata.key_inline = true;
1089
1090 /* xts_skcipher_encrypt shared descriptor */
1091 flc = &ctx->flc[ENCRYPT];
1092 desc = flc->sh_desc;
1093 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1094 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1095 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1096 sizeof(flc->flc) + desc_bytes(desc),
1097 ctx->dir);
1098
1099 /* xts_skcipher_decrypt shared descriptor */
1100 flc = &ctx->flc[DECRYPT];
1101 desc = flc->sh_desc;
1102 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1103 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1104 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1105 sizeof(flc->flc) + desc_bytes(desc),
1106 ctx->dir);
1107
1108 return 0;
1109 }
1110
skcipher_edesc_alloc(struct skcipher_request * req)1111 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1112 {
1113 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1114 struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
1115 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1116 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1117 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1118 struct device *dev = ctx->dev;
1119 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1120 GFP_KERNEL : GFP_ATOMIC;
1121 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1122 struct skcipher_edesc *edesc;
1123 dma_addr_t iv_dma;
1124 u8 *iv;
1125 int ivsize = crypto_skcipher_ivsize(skcipher);
1126 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1127 struct dpaa2_sg_entry *sg_table;
1128
1129 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1130 if (unlikely(src_nents < 0)) {
1131 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1132 req->cryptlen);
1133 return ERR_PTR(src_nents);
1134 }
1135
1136 if (unlikely(req->dst != req->src)) {
1137 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1138 if (unlikely(dst_nents < 0)) {
1139 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1140 req->cryptlen);
1141 return ERR_PTR(dst_nents);
1142 }
1143
1144 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1145 DMA_TO_DEVICE);
1146 if (unlikely(!mapped_src_nents)) {
1147 dev_err(dev, "unable to map source\n");
1148 return ERR_PTR(-ENOMEM);
1149 }
1150
1151 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1152 DMA_FROM_DEVICE);
1153 if (unlikely(!mapped_dst_nents)) {
1154 dev_err(dev, "unable to map destination\n");
1155 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1156 return ERR_PTR(-ENOMEM);
1157 }
1158 } else {
1159 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1160 DMA_BIDIRECTIONAL);
1161 if (unlikely(!mapped_src_nents)) {
1162 dev_err(dev, "unable to map source\n");
1163 return ERR_PTR(-ENOMEM);
1164 }
1165 }
1166
1167 qm_sg_ents = 1 + mapped_src_nents;
1168 dst_sg_idx = qm_sg_ents;
1169
1170 /*
1171 * Input, output HW S/G tables: [IV, src][dst, IV]
1172 * IV entries point to the same buffer
1173 * If src == dst, S/G entries are reused (S/G tables overlap)
1174 *
1175 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1176 * the end of the table by allocating more S/G entries.
1177 */
1178 if (req->src != req->dst)
1179 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1180 else
1181 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1182
1183 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1184 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1185 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1186 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1187 qm_sg_ents, ivsize);
1188 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1189 0, DMA_NONE, 0, 0);
1190 return ERR_PTR(-ENOMEM);
1191 }
1192
1193 /* allocate space for base edesc, link tables and IV */
1194 edesc = qi_cache_zalloc(flags);
1195 if (unlikely(!edesc)) {
1196 dev_err(dev, "could not allocate extended descriptor\n");
1197 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1198 0, DMA_NONE, 0, 0);
1199 return ERR_PTR(-ENOMEM);
1200 }
1201
1202 /* Make sure IV is located in a DMAable area */
1203 sg_table = &edesc->sgt[0];
1204 iv = (u8 *)(sg_table + qm_sg_ents);
1205 memcpy(iv, req->iv, ivsize);
1206
1207 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1208 if (dma_mapping_error(dev, iv_dma)) {
1209 dev_err(dev, "unable to map IV\n");
1210 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1211 0, DMA_NONE, 0, 0);
1212 qi_cache_free(edesc);
1213 return ERR_PTR(-ENOMEM);
1214 }
1215
1216 edesc->src_nents = src_nents;
1217 edesc->dst_nents = dst_nents;
1218 edesc->iv_dma = iv_dma;
1219 edesc->qm_sg_bytes = qm_sg_bytes;
1220
1221 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1222 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1223
1224 if (req->src != req->dst)
1225 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1226
1227 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1228 ivsize, 0);
1229
1230 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1231 DMA_TO_DEVICE);
1232 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1233 dev_err(dev, "unable to map S/G table\n");
1234 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1235 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1236 qi_cache_free(edesc);
1237 return ERR_PTR(-ENOMEM);
1238 }
1239
1240 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1241 dpaa2_fl_set_final(in_fle, true);
1242 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1243 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1244
1245 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1246 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1247
1248 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1249
1250 if (req->src == req->dst)
1251 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1252 sizeof(*sg_table));
1253 else
1254 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1255 sizeof(*sg_table));
1256
1257 return edesc;
1258 }
1259
aead_unmap(struct device * dev,struct aead_edesc * edesc,struct aead_request * req)1260 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1261 struct aead_request *req)
1262 {
1263 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1264 int ivsize = crypto_aead_ivsize(aead);
1265
1266 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1267 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1268 edesc->qm_sg_bytes);
1269 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1270 }
1271
skcipher_unmap(struct device * dev,struct skcipher_edesc * edesc,struct skcipher_request * req)1272 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1273 struct skcipher_request *req)
1274 {
1275 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1276 int ivsize = crypto_skcipher_ivsize(skcipher);
1277
1278 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1279 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1280 edesc->qm_sg_bytes);
1281 }
1282
aead_encrypt_done(void * cbk_ctx,u32 status)1283 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1284 {
1285 struct crypto_async_request *areq = cbk_ctx;
1286 struct aead_request *req = container_of(areq, struct aead_request,
1287 base);
1288 struct caam_request *req_ctx = to_caam_req(areq);
1289 struct aead_edesc *edesc = req_ctx->edesc;
1290 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1291 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1292 int ecode = 0;
1293
1294 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1295
1296 if (unlikely(status))
1297 ecode = caam_qi2_strstatus(ctx->dev, status);
1298
1299 aead_unmap(ctx->dev, edesc, req);
1300 qi_cache_free(edesc);
1301 aead_request_complete(req, ecode);
1302 }
1303
aead_decrypt_done(void * cbk_ctx,u32 status)1304 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1305 {
1306 struct crypto_async_request *areq = cbk_ctx;
1307 struct aead_request *req = container_of(areq, struct aead_request,
1308 base);
1309 struct caam_request *req_ctx = to_caam_req(areq);
1310 struct aead_edesc *edesc = req_ctx->edesc;
1311 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1312 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1313 int ecode = 0;
1314
1315 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1316
1317 if (unlikely(status))
1318 ecode = caam_qi2_strstatus(ctx->dev, status);
1319
1320 aead_unmap(ctx->dev, edesc, req);
1321 qi_cache_free(edesc);
1322 aead_request_complete(req, ecode);
1323 }
1324
aead_encrypt(struct aead_request * req)1325 static int aead_encrypt(struct aead_request *req)
1326 {
1327 struct aead_edesc *edesc;
1328 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1329 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1330 struct caam_request *caam_req = aead_request_ctx_dma(req);
1331 int ret;
1332
1333 /* allocate extended descriptor */
1334 edesc = aead_edesc_alloc(req, true);
1335 if (IS_ERR(edesc))
1336 return PTR_ERR(edesc);
1337
1338 caam_req->flc = &ctx->flc[ENCRYPT];
1339 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1340 caam_req->cbk = aead_encrypt_done;
1341 caam_req->ctx = &req->base;
1342 caam_req->edesc = edesc;
1343 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1344 if (ret != -EINPROGRESS &&
1345 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1346 aead_unmap(ctx->dev, edesc, req);
1347 qi_cache_free(edesc);
1348 }
1349
1350 return ret;
1351 }
1352
aead_decrypt(struct aead_request * req)1353 static int aead_decrypt(struct aead_request *req)
1354 {
1355 struct aead_edesc *edesc;
1356 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1357 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1358 struct caam_request *caam_req = aead_request_ctx_dma(req);
1359 int ret;
1360
1361 /* allocate extended descriptor */
1362 edesc = aead_edesc_alloc(req, false);
1363 if (IS_ERR(edesc))
1364 return PTR_ERR(edesc);
1365
1366 caam_req->flc = &ctx->flc[DECRYPT];
1367 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1368 caam_req->cbk = aead_decrypt_done;
1369 caam_req->ctx = &req->base;
1370 caam_req->edesc = edesc;
1371 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1372 if (ret != -EINPROGRESS &&
1373 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1374 aead_unmap(ctx->dev, edesc, req);
1375 qi_cache_free(edesc);
1376 }
1377
1378 return ret;
1379 }
1380
ipsec_gcm_encrypt(struct aead_request * req)1381 static int ipsec_gcm_encrypt(struct aead_request *req)
1382 {
1383 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1384 }
1385
ipsec_gcm_decrypt(struct aead_request * req)1386 static int ipsec_gcm_decrypt(struct aead_request *req)
1387 {
1388 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1389 }
1390
skcipher_encrypt_done(void * cbk_ctx,u32 status)1391 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1392 {
1393 struct crypto_async_request *areq = cbk_ctx;
1394 struct skcipher_request *req = skcipher_request_cast(areq);
1395 struct caam_request *req_ctx = to_caam_req(areq);
1396 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1397 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1398 struct skcipher_edesc *edesc = req_ctx->edesc;
1399 int ecode = 0;
1400 int ivsize = crypto_skcipher_ivsize(skcipher);
1401
1402 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1403
1404 if (unlikely(status))
1405 ecode = caam_qi2_strstatus(ctx->dev, status);
1406
1407 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1408 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1409 edesc->src_nents > 1 ? 100 : ivsize, 1);
1410 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1411 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1412 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1413
1414 skcipher_unmap(ctx->dev, edesc, req);
1415
1416 /*
1417 * The crypto API expects us to set the IV (req->iv) to the last
1418 * ciphertext block (CBC mode) or last counter (CTR mode).
1419 * This is used e.g. by the CTS mode.
1420 */
1421 if (!ecode)
1422 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1423 ivsize);
1424
1425 qi_cache_free(edesc);
1426 skcipher_request_complete(req, ecode);
1427 }
1428
skcipher_decrypt_done(void * cbk_ctx,u32 status)1429 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1430 {
1431 struct crypto_async_request *areq = cbk_ctx;
1432 struct skcipher_request *req = skcipher_request_cast(areq);
1433 struct caam_request *req_ctx = to_caam_req(areq);
1434 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1435 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1436 struct skcipher_edesc *edesc = req_ctx->edesc;
1437 int ecode = 0;
1438 int ivsize = crypto_skcipher_ivsize(skcipher);
1439
1440 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1441
1442 if (unlikely(status))
1443 ecode = caam_qi2_strstatus(ctx->dev, status);
1444
1445 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1446 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1447 edesc->src_nents > 1 ? 100 : ivsize, 1);
1448 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1449 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1450 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1451
1452 skcipher_unmap(ctx->dev, edesc, req);
1453
1454 /*
1455 * The crypto API expects us to set the IV (req->iv) to the last
1456 * ciphertext block (CBC mode) or last counter (CTR mode).
1457 * This is used e.g. by the CTS mode.
1458 */
1459 if (!ecode)
1460 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1461 ivsize);
1462
1463 qi_cache_free(edesc);
1464 skcipher_request_complete(req, ecode);
1465 }
1466
xts_skcipher_ivsize(struct skcipher_request * req)1467 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1468 {
1469 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1470 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1471
1472 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1473 }
1474
skcipher_encrypt(struct skcipher_request * req)1475 static int skcipher_encrypt(struct skcipher_request *req)
1476 {
1477 struct skcipher_edesc *edesc;
1478 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1479 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1480 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1481 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1482 int ret;
1483
1484 /*
1485 * XTS is expected to return an error even for input length = 0
1486 * Note that the case input length < block size will be caught during
1487 * HW offloading and return an error.
1488 */
1489 if (!req->cryptlen && !ctx->fallback)
1490 return 0;
1491
1492 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1493 ctx->xts_key_fallback)) {
1494 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1495 skcipher_request_set_callback(&caam_req->fallback_req,
1496 req->base.flags,
1497 req->base.complete,
1498 req->base.data);
1499 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1500 req->dst, req->cryptlen, req->iv);
1501
1502 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1503 }
1504
1505 /* allocate extended descriptor */
1506 edesc = skcipher_edesc_alloc(req);
1507 if (IS_ERR(edesc))
1508 return PTR_ERR(edesc);
1509
1510 caam_req->flc = &ctx->flc[ENCRYPT];
1511 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1512 caam_req->cbk = skcipher_encrypt_done;
1513 caam_req->ctx = &req->base;
1514 caam_req->edesc = edesc;
1515 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1516 if (ret != -EINPROGRESS &&
1517 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1518 skcipher_unmap(ctx->dev, edesc, req);
1519 qi_cache_free(edesc);
1520 }
1521
1522 return ret;
1523 }
1524
skcipher_decrypt(struct skcipher_request * req)1525 static int skcipher_decrypt(struct skcipher_request *req)
1526 {
1527 struct skcipher_edesc *edesc;
1528 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1529 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1530 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1531 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1532 int ret;
1533
1534 /*
1535 * XTS is expected to return an error even for input length = 0
1536 * Note that the case input length < block size will be caught during
1537 * HW offloading and return an error.
1538 */
1539 if (!req->cryptlen && !ctx->fallback)
1540 return 0;
1541
1542 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1543 ctx->xts_key_fallback)) {
1544 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1545 skcipher_request_set_callback(&caam_req->fallback_req,
1546 req->base.flags,
1547 req->base.complete,
1548 req->base.data);
1549 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1550 req->dst, req->cryptlen, req->iv);
1551
1552 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1553 }
1554
1555 /* allocate extended descriptor */
1556 edesc = skcipher_edesc_alloc(req);
1557 if (IS_ERR(edesc))
1558 return PTR_ERR(edesc);
1559
1560 caam_req->flc = &ctx->flc[DECRYPT];
1561 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1562 caam_req->cbk = skcipher_decrypt_done;
1563 caam_req->ctx = &req->base;
1564 caam_req->edesc = edesc;
1565 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1566 if (ret != -EINPROGRESS &&
1567 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1568 skcipher_unmap(ctx->dev, edesc, req);
1569 qi_cache_free(edesc);
1570 }
1571
1572 return ret;
1573 }
1574
caam_cra_init(struct caam_ctx * ctx,struct caam_alg_entry * caam,bool uses_dkp)1575 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1576 bool uses_dkp)
1577 {
1578 dma_addr_t dma_addr;
1579 int i;
1580
1581 /* copy descriptor header template value */
1582 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1583 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1584
1585 ctx->dev = caam->dev;
1586 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1587
1588 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1589 offsetof(struct caam_ctx, flc_dma),
1590 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1591 if (dma_mapping_error(ctx->dev, dma_addr)) {
1592 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1593 return -ENOMEM;
1594 }
1595
1596 for (i = 0; i < NUM_OP; i++)
1597 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1598 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1599
1600 return 0;
1601 }
1602
caam_cra_init_skcipher(struct crypto_skcipher * tfm)1603 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1604 {
1605 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1606 struct caam_skcipher_alg *caam_alg =
1607 container_of(alg, typeof(*caam_alg), skcipher);
1608 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1609 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1610 int ret = 0;
1611
1612 if (alg_aai == OP_ALG_AAI_XTS) {
1613 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1614 struct crypto_skcipher *fallback;
1615
1616 fallback = crypto_alloc_skcipher(tfm_name, 0,
1617 CRYPTO_ALG_NEED_FALLBACK);
1618 if (IS_ERR(fallback)) {
1619 dev_err(caam_alg->caam.dev,
1620 "Failed to allocate %s fallback: %ld\n",
1621 tfm_name, PTR_ERR(fallback));
1622 return PTR_ERR(fallback);
1623 }
1624
1625 ctx->fallback = fallback;
1626 crypto_skcipher_set_reqsize_dma(
1627 tfm, sizeof(struct caam_request) +
1628 crypto_skcipher_reqsize(fallback));
1629 } else {
1630 crypto_skcipher_set_reqsize_dma(tfm,
1631 sizeof(struct caam_request));
1632 }
1633
1634 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1635 if (ret && ctx->fallback)
1636 crypto_free_skcipher(ctx->fallback);
1637
1638 return ret;
1639 }
1640
caam_cra_init_aead(struct crypto_aead * tfm)1641 static int caam_cra_init_aead(struct crypto_aead *tfm)
1642 {
1643 struct aead_alg *alg = crypto_aead_alg(tfm);
1644 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1645 aead);
1646
1647 crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
1648 return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
1649 !caam_alg->caam.nodkp);
1650 }
1651
caam_exit_common(struct caam_ctx * ctx)1652 static void caam_exit_common(struct caam_ctx *ctx)
1653 {
1654 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1655 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1656 DMA_ATTR_SKIP_CPU_SYNC);
1657 }
1658
caam_cra_exit(struct crypto_skcipher * tfm)1659 static void caam_cra_exit(struct crypto_skcipher *tfm)
1660 {
1661 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1662
1663 if (ctx->fallback)
1664 crypto_free_skcipher(ctx->fallback);
1665 caam_exit_common(ctx);
1666 }
1667
caam_cra_exit_aead(struct crypto_aead * tfm)1668 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1669 {
1670 caam_exit_common(crypto_aead_ctx_dma(tfm));
1671 }
1672
1673 static struct caam_skcipher_alg driver_algs[] = {
1674 {
1675 .skcipher = {
1676 .base = {
1677 .cra_name = "cbc(aes)",
1678 .cra_driver_name = "cbc-aes-caam-qi2",
1679 .cra_blocksize = AES_BLOCK_SIZE,
1680 },
1681 .setkey = aes_skcipher_setkey,
1682 .encrypt = skcipher_encrypt,
1683 .decrypt = skcipher_decrypt,
1684 .min_keysize = AES_MIN_KEY_SIZE,
1685 .max_keysize = AES_MAX_KEY_SIZE,
1686 .ivsize = AES_BLOCK_SIZE,
1687 },
1688 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1689 },
1690 {
1691 .skcipher = {
1692 .base = {
1693 .cra_name = "cbc(des3_ede)",
1694 .cra_driver_name = "cbc-3des-caam-qi2",
1695 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1696 },
1697 .setkey = des3_skcipher_setkey,
1698 .encrypt = skcipher_encrypt,
1699 .decrypt = skcipher_decrypt,
1700 .min_keysize = DES3_EDE_KEY_SIZE,
1701 .max_keysize = DES3_EDE_KEY_SIZE,
1702 .ivsize = DES3_EDE_BLOCK_SIZE,
1703 },
1704 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1705 },
1706 {
1707 .skcipher = {
1708 .base = {
1709 .cra_name = "cbc(des)",
1710 .cra_driver_name = "cbc-des-caam-qi2",
1711 .cra_blocksize = DES_BLOCK_SIZE,
1712 },
1713 .setkey = des_skcipher_setkey,
1714 .encrypt = skcipher_encrypt,
1715 .decrypt = skcipher_decrypt,
1716 .min_keysize = DES_KEY_SIZE,
1717 .max_keysize = DES_KEY_SIZE,
1718 .ivsize = DES_BLOCK_SIZE,
1719 },
1720 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1721 },
1722 {
1723 .skcipher = {
1724 .base = {
1725 .cra_name = "ctr(aes)",
1726 .cra_driver_name = "ctr-aes-caam-qi2",
1727 .cra_blocksize = 1,
1728 },
1729 .setkey = ctr_skcipher_setkey,
1730 .encrypt = skcipher_encrypt,
1731 .decrypt = skcipher_decrypt,
1732 .min_keysize = AES_MIN_KEY_SIZE,
1733 .max_keysize = AES_MAX_KEY_SIZE,
1734 .ivsize = AES_BLOCK_SIZE,
1735 .chunksize = AES_BLOCK_SIZE,
1736 },
1737 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1738 OP_ALG_AAI_CTR_MOD128,
1739 },
1740 {
1741 .skcipher = {
1742 .base = {
1743 .cra_name = "rfc3686(ctr(aes))",
1744 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1745 .cra_blocksize = 1,
1746 },
1747 .setkey = rfc3686_skcipher_setkey,
1748 .encrypt = skcipher_encrypt,
1749 .decrypt = skcipher_decrypt,
1750 .min_keysize = AES_MIN_KEY_SIZE +
1751 CTR_RFC3686_NONCE_SIZE,
1752 .max_keysize = AES_MAX_KEY_SIZE +
1753 CTR_RFC3686_NONCE_SIZE,
1754 .ivsize = CTR_RFC3686_IV_SIZE,
1755 .chunksize = AES_BLOCK_SIZE,
1756 },
1757 .caam = {
1758 .class1_alg_type = OP_ALG_ALGSEL_AES |
1759 OP_ALG_AAI_CTR_MOD128,
1760 .rfc3686 = true,
1761 },
1762 },
1763 {
1764 .skcipher = {
1765 .base = {
1766 .cra_name = "xts(aes)",
1767 .cra_driver_name = "xts-aes-caam-qi2",
1768 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1769 .cra_blocksize = AES_BLOCK_SIZE,
1770 },
1771 .setkey = xts_skcipher_setkey,
1772 .encrypt = skcipher_encrypt,
1773 .decrypt = skcipher_decrypt,
1774 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1775 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1776 .ivsize = AES_BLOCK_SIZE,
1777 },
1778 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1779 },
1780 {
1781 .skcipher = {
1782 .base = {
1783 .cra_name = "chacha20",
1784 .cra_driver_name = "chacha20-caam-qi2",
1785 .cra_blocksize = 1,
1786 },
1787 .setkey = chacha20_skcipher_setkey,
1788 .encrypt = skcipher_encrypt,
1789 .decrypt = skcipher_decrypt,
1790 .min_keysize = CHACHA_KEY_SIZE,
1791 .max_keysize = CHACHA_KEY_SIZE,
1792 .ivsize = CHACHA_IV_SIZE,
1793 },
1794 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1795 },
1796 };
1797
1798 static struct caam_aead_alg driver_aeads[] = {
1799 {
1800 .aead = {
1801 .base = {
1802 .cra_name = "rfc4106(gcm(aes))",
1803 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1804 .cra_blocksize = 1,
1805 },
1806 .setkey = rfc4106_setkey,
1807 .setauthsize = rfc4106_setauthsize,
1808 .encrypt = ipsec_gcm_encrypt,
1809 .decrypt = ipsec_gcm_decrypt,
1810 .ivsize = 8,
1811 .maxauthsize = AES_BLOCK_SIZE,
1812 },
1813 .caam = {
1814 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1815 .nodkp = true,
1816 },
1817 },
1818 {
1819 .aead = {
1820 .base = {
1821 .cra_name = "rfc4543(gcm(aes))",
1822 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1823 .cra_blocksize = 1,
1824 },
1825 .setkey = rfc4543_setkey,
1826 .setauthsize = rfc4543_setauthsize,
1827 .encrypt = ipsec_gcm_encrypt,
1828 .decrypt = ipsec_gcm_decrypt,
1829 .ivsize = 8,
1830 .maxauthsize = AES_BLOCK_SIZE,
1831 },
1832 .caam = {
1833 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1834 .nodkp = true,
1835 },
1836 },
1837 /* Galois Counter Mode */
1838 {
1839 .aead = {
1840 .base = {
1841 .cra_name = "gcm(aes)",
1842 .cra_driver_name = "gcm-aes-caam-qi2",
1843 .cra_blocksize = 1,
1844 },
1845 .setkey = gcm_setkey,
1846 .setauthsize = gcm_setauthsize,
1847 .encrypt = aead_encrypt,
1848 .decrypt = aead_decrypt,
1849 .ivsize = 12,
1850 .maxauthsize = AES_BLOCK_SIZE,
1851 },
1852 .caam = {
1853 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1854 .nodkp = true,
1855 }
1856 },
1857 /* single-pass ipsec_esp descriptor */
1858 {
1859 .aead = {
1860 .base = {
1861 .cra_name = "authenc(hmac(md5),cbc(aes))",
1862 .cra_driver_name = "authenc-hmac-md5-"
1863 "cbc-aes-caam-qi2",
1864 .cra_blocksize = AES_BLOCK_SIZE,
1865 },
1866 .setkey = aead_setkey,
1867 .setauthsize = aead_setauthsize,
1868 .encrypt = aead_encrypt,
1869 .decrypt = aead_decrypt,
1870 .ivsize = AES_BLOCK_SIZE,
1871 .maxauthsize = MD5_DIGEST_SIZE,
1872 },
1873 .caam = {
1874 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1875 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1876 OP_ALG_AAI_HMAC_PRECOMP,
1877 }
1878 },
1879 {
1880 .aead = {
1881 .base = {
1882 .cra_name = "echainiv(authenc(hmac(md5),"
1883 "cbc(aes)))",
1884 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1885 "cbc-aes-caam-qi2",
1886 .cra_blocksize = AES_BLOCK_SIZE,
1887 },
1888 .setkey = aead_setkey,
1889 .setauthsize = aead_setauthsize,
1890 .encrypt = aead_encrypt,
1891 .decrypt = aead_decrypt,
1892 .ivsize = AES_BLOCK_SIZE,
1893 .maxauthsize = MD5_DIGEST_SIZE,
1894 },
1895 .caam = {
1896 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1897 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1898 OP_ALG_AAI_HMAC_PRECOMP,
1899 .geniv = true,
1900 }
1901 },
1902 {
1903 .aead = {
1904 .base = {
1905 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1906 .cra_driver_name = "authenc-hmac-sha1-"
1907 "cbc-aes-caam-qi2",
1908 .cra_blocksize = AES_BLOCK_SIZE,
1909 },
1910 .setkey = aead_setkey,
1911 .setauthsize = aead_setauthsize,
1912 .encrypt = aead_encrypt,
1913 .decrypt = aead_decrypt,
1914 .ivsize = AES_BLOCK_SIZE,
1915 .maxauthsize = SHA1_DIGEST_SIZE,
1916 },
1917 .caam = {
1918 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1919 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1920 OP_ALG_AAI_HMAC_PRECOMP,
1921 }
1922 },
1923 {
1924 .aead = {
1925 .base = {
1926 .cra_name = "echainiv(authenc(hmac(sha1),"
1927 "cbc(aes)))",
1928 .cra_driver_name = "echainiv-authenc-"
1929 "hmac-sha1-cbc-aes-caam-qi2",
1930 .cra_blocksize = AES_BLOCK_SIZE,
1931 },
1932 .setkey = aead_setkey,
1933 .setauthsize = aead_setauthsize,
1934 .encrypt = aead_encrypt,
1935 .decrypt = aead_decrypt,
1936 .ivsize = AES_BLOCK_SIZE,
1937 .maxauthsize = SHA1_DIGEST_SIZE,
1938 },
1939 .caam = {
1940 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1941 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1942 OP_ALG_AAI_HMAC_PRECOMP,
1943 .geniv = true,
1944 },
1945 },
1946 {
1947 .aead = {
1948 .base = {
1949 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1950 .cra_driver_name = "authenc-hmac-sha224-"
1951 "cbc-aes-caam-qi2",
1952 .cra_blocksize = AES_BLOCK_SIZE,
1953 },
1954 .setkey = aead_setkey,
1955 .setauthsize = aead_setauthsize,
1956 .encrypt = aead_encrypt,
1957 .decrypt = aead_decrypt,
1958 .ivsize = AES_BLOCK_SIZE,
1959 .maxauthsize = SHA224_DIGEST_SIZE,
1960 },
1961 .caam = {
1962 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1963 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1964 OP_ALG_AAI_HMAC_PRECOMP,
1965 }
1966 },
1967 {
1968 .aead = {
1969 .base = {
1970 .cra_name = "echainiv(authenc(hmac(sha224),"
1971 "cbc(aes)))",
1972 .cra_driver_name = "echainiv-authenc-"
1973 "hmac-sha224-cbc-aes-caam-qi2",
1974 .cra_blocksize = AES_BLOCK_SIZE,
1975 },
1976 .setkey = aead_setkey,
1977 .setauthsize = aead_setauthsize,
1978 .encrypt = aead_encrypt,
1979 .decrypt = aead_decrypt,
1980 .ivsize = AES_BLOCK_SIZE,
1981 .maxauthsize = SHA224_DIGEST_SIZE,
1982 },
1983 .caam = {
1984 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1985 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1986 OP_ALG_AAI_HMAC_PRECOMP,
1987 .geniv = true,
1988 }
1989 },
1990 {
1991 .aead = {
1992 .base = {
1993 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1994 .cra_driver_name = "authenc-hmac-sha256-"
1995 "cbc-aes-caam-qi2",
1996 .cra_blocksize = AES_BLOCK_SIZE,
1997 },
1998 .setkey = aead_setkey,
1999 .setauthsize = aead_setauthsize,
2000 .encrypt = aead_encrypt,
2001 .decrypt = aead_decrypt,
2002 .ivsize = AES_BLOCK_SIZE,
2003 .maxauthsize = SHA256_DIGEST_SIZE,
2004 },
2005 .caam = {
2006 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2007 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2008 OP_ALG_AAI_HMAC_PRECOMP,
2009 }
2010 },
2011 {
2012 .aead = {
2013 .base = {
2014 .cra_name = "echainiv(authenc(hmac(sha256),"
2015 "cbc(aes)))",
2016 .cra_driver_name = "echainiv-authenc-"
2017 "hmac-sha256-cbc-aes-"
2018 "caam-qi2",
2019 .cra_blocksize = AES_BLOCK_SIZE,
2020 },
2021 .setkey = aead_setkey,
2022 .setauthsize = aead_setauthsize,
2023 .encrypt = aead_encrypt,
2024 .decrypt = aead_decrypt,
2025 .ivsize = AES_BLOCK_SIZE,
2026 .maxauthsize = SHA256_DIGEST_SIZE,
2027 },
2028 .caam = {
2029 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2030 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2031 OP_ALG_AAI_HMAC_PRECOMP,
2032 .geniv = true,
2033 }
2034 },
2035 {
2036 .aead = {
2037 .base = {
2038 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2039 .cra_driver_name = "authenc-hmac-sha384-"
2040 "cbc-aes-caam-qi2",
2041 .cra_blocksize = AES_BLOCK_SIZE,
2042 },
2043 .setkey = aead_setkey,
2044 .setauthsize = aead_setauthsize,
2045 .encrypt = aead_encrypt,
2046 .decrypt = aead_decrypt,
2047 .ivsize = AES_BLOCK_SIZE,
2048 .maxauthsize = SHA384_DIGEST_SIZE,
2049 },
2050 .caam = {
2051 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2052 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2053 OP_ALG_AAI_HMAC_PRECOMP,
2054 }
2055 },
2056 {
2057 .aead = {
2058 .base = {
2059 .cra_name = "echainiv(authenc(hmac(sha384),"
2060 "cbc(aes)))",
2061 .cra_driver_name = "echainiv-authenc-"
2062 "hmac-sha384-cbc-aes-"
2063 "caam-qi2",
2064 .cra_blocksize = AES_BLOCK_SIZE,
2065 },
2066 .setkey = aead_setkey,
2067 .setauthsize = aead_setauthsize,
2068 .encrypt = aead_encrypt,
2069 .decrypt = aead_decrypt,
2070 .ivsize = AES_BLOCK_SIZE,
2071 .maxauthsize = SHA384_DIGEST_SIZE,
2072 },
2073 .caam = {
2074 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2075 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2076 OP_ALG_AAI_HMAC_PRECOMP,
2077 .geniv = true,
2078 }
2079 },
2080 {
2081 .aead = {
2082 .base = {
2083 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2084 .cra_driver_name = "authenc-hmac-sha512-"
2085 "cbc-aes-caam-qi2",
2086 .cra_blocksize = AES_BLOCK_SIZE,
2087 },
2088 .setkey = aead_setkey,
2089 .setauthsize = aead_setauthsize,
2090 .encrypt = aead_encrypt,
2091 .decrypt = aead_decrypt,
2092 .ivsize = AES_BLOCK_SIZE,
2093 .maxauthsize = SHA512_DIGEST_SIZE,
2094 },
2095 .caam = {
2096 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2097 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2098 OP_ALG_AAI_HMAC_PRECOMP,
2099 }
2100 },
2101 {
2102 .aead = {
2103 .base = {
2104 .cra_name = "echainiv(authenc(hmac(sha512),"
2105 "cbc(aes)))",
2106 .cra_driver_name = "echainiv-authenc-"
2107 "hmac-sha512-cbc-aes-"
2108 "caam-qi2",
2109 .cra_blocksize = AES_BLOCK_SIZE,
2110 },
2111 .setkey = aead_setkey,
2112 .setauthsize = aead_setauthsize,
2113 .encrypt = aead_encrypt,
2114 .decrypt = aead_decrypt,
2115 .ivsize = AES_BLOCK_SIZE,
2116 .maxauthsize = SHA512_DIGEST_SIZE,
2117 },
2118 .caam = {
2119 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2120 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2121 OP_ALG_AAI_HMAC_PRECOMP,
2122 .geniv = true,
2123 }
2124 },
2125 {
2126 .aead = {
2127 .base = {
2128 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2129 .cra_driver_name = "authenc-hmac-md5-"
2130 "cbc-des3_ede-caam-qi2",
2131 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2132 },
2133 .setkey = des3_aead_setkey,
2134 .setauthsize = aead_setauthsize,
2135 .encrypt = aead_encrypt,
2136 .decrypt = aead_decrypt,
2137 .ivsize = DES3_EDE_BLOCK_SIZE,
2138 .maxauthsize = MD5_DIGEST_SIZE,
2139 },
2140 .caam = {
2141 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2142 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2143 OP_ALG_AAI_HMAC_PRECOMP,
2144 }
2145 },
2146 {
2147 .aead = {
2148 .base = {
2149 .cra_name = "echainiv(authenc(hmac(md5),"
2150 "cbc(des3_ede)))",
2151 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2152 "cbc-des3_ede-caam-qi2",
2153 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2154 },
2155 .setkey = des3_aead_setkey,
2156 .setauthsize = aead_setauthsize,
2157 .encrypt = aead_encrypt,
2158 .decrypt = aead_decrypt,
2159 .ivsize = DES3_EDE_BLOCK_SIZE,
2160 .maxauthsize = MD5_DIGEST_SIZE,
2161 },
2162 .caam = {
2163 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2164 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2165 OP_ALG_AAI_HMAC_PRECOMP,
2166 .geniv = true,
2167 }
2168 },
2169 {
2170 .aead = {
2171 .base = {
2172 .cra_name = "authenc(hmac(sha1),"
2173 "cbc(des3_ede))",
2174 .cra_driver_name = "authenc-hmac-sha1-"
2175 "cbc-des3_ede-caam-qi2",
2176 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2177 },
2178 .setkey = des3_aead_setkey,
2179 .setauthsize = aead_setauthsize,
2180 .encrypt = aead_encrypt,
2181 .decrypt = aead_decrypt,
2182 .ivsize = DES3_EDE_BLOCK_SIZE,
2183 .maxauthsize = SHA1_DIGEST_SIZE,
2184 },
2185 .caam = {
2186 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2187 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2188 OP_ALG_AAI_HMAC_PRECOMP,
2189 },
2190 },
2191 {
2192 .aead = {
2193 .base = {
2194 .cra_name = "echainiv(authenc(hmac(sha1),"
2195 "cbc(des3_ede)))",
2196 .cra_driver_name = "echainiv-authenc-"
2197 "hmac-sha1-"
2198 "cbc-des3_ede-caam-qi2",
2199 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2200 },
2201 .setkey = des3_aead_setkey,
2202 .setauthsize = aead_setauthsize,
2203 .encrypt = aead_encrypt,
2204 .decrypt = aead_decrypt,
2205 .ivsize = DES3_EDE_BLOCK_SIZE,
2206 .maxauthsize = SHA1_DIGEST_SIZE,
2207 },
2208 .caam = {
2209 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2210 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2211 OP_ALG_AAI_HMAC_PRECOMP,
2212 .geniv = true,
2213 }
2214 },
2215 {
2216 .aead = {
2217 .base = {
2218 .cra_name = "authenc(hmac(sha224),"
2219 "cbc(des3_ede))",
2220 .cra_driver_name = "authenc-hmac-sha224-"
2221 "cbc-des3_ede-caam-qi2",
2222 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2223 },
2224 .setkey = des3_aead_setkey,
2225 .setauthsize = aead_setauthsize,
2226 .encrypt = aead_encrypt,
2227 .decrypt = aead_decrypt,
2228 .ivsize = DES3_EDE_BLOCK_SIZE,
2229 .maxauthsize = SHA224_DIGEST_SIZE,
2230 },
2231 .caam = {
2232 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2233 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2234 OP_ALG_AAI_HMAC_PRECOMP,
2235 },
2236 },
2237 {
2238 .aead = {
2239 .base = {
2240 .cra_name = "echainiv(authenc(hmac(sha224),"
2241 "cbc(des3_ede)))",
2242 .cra_driver_name = "echainiv-authenc-"
2243 "hmac-sha224-"
2244 "cbc-des3_ede-caam-qi2",
2245 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2246 },
2247 .setkey = des3_aead_setkey,
2248 .setauthsize = aead_setauthsize,
2249 .encrypt = aead_encrypt,
2250 .decrypt = aead_decrypt,
2251 .ivsize = DES3_EDE_BLOCK_SIZE,
2252 .maxauthsize = SHA224_DIGEST_SIZE,
2253 },
2254 .caam = {
2255 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2256 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2257 OP_ALG_AAI_HMAC_PRECOMP,
2258 .geniv = true,
2259 }
2260 },
2261 {
2262 .aead = {
2263 .base = {
2264 .cra_name = "authenc(hmac(sha256),"
2265 "cbc(des3_ede))",
2266 .cra_driver_name = "authenc-hmac-sha256-"
2267 "cbc-des3_ede-caam-qi2",
2268 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2269 },
2270 .setkey = des3_aead_setkey,
2271 .setauthsize = aead_setauthsize,
2272 .encrypt = aead_encrypt,
2273 .decrypt = aead_decrypt,
2274 .ivsize = DES3_EDE_BLOCK_SIZE,
2275 .maxauthsize = SHA256_DIGEST_SIZE,
2276 },
2277 .caam = {
2278 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2279 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2280 OP_ALG_AAI_HMAC_PRECOMP,
2281 },
2282 },
2283 {
2284 .aead = {
2285 .base = {
2286 .cra_name = "echainiv(authenc(hmac(sha256),"
2287 "cbc(des3_ede)))",
2288 .cra_driver_name = "echainiv-authenc-"
2289 "hmac-sha256-"
2290 "cbc-des3_ede-caam-qi2",
2291 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2292 },
2293 .setkey = des3_aead_setkey,
2294 .setauthsize = aead_setauthsize,
2295 .encrypt = aead_encrypt,
2296 .decrypt = aead_decrypt,
2297 .ivsize = DES3_EDE_BLOCK_SIZE,
2298 .maxauthsize = SHA256_DIGEST_SIZE,
2299 },
2300 .caam = {
2301 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2302 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2303 OP_ALG_AAI_HMAC_PRECOMP,
2304 .geniv = true,
2305 }
2306 },
2307 {
2308 .aead = {
2309 .base = {
2310 .cra_name = "authenc(hmac(sha384),"
2311 "cbc(des3_ede))",
2312 .cra_driver_name = "authenc-hmac-sha384-"
2313 "cbc-des3_ede-caam-qi2",
2314 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2315 },
2316 .setkey = des3_aead_setkey,
2317 .setauthsize = aead_setauthsize,
2318 .encrypt = aead_encrypt,
2319 .decrypt = aead_decrypt,
2320 .ivsize = DES3_EDE_BLOCK_SIZE,
2321 .maxauthsize = SHA384_DIGEST_SIZE,
2322 },
2323 .caam = {
2324 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2325 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2326 OP_ALG_AAI_HMAC_PRECOMP,
2327 },
2328 },
2329 {
2330 .aead = {
2331 .base = {
2332 .cra_name = "echainiv(authenc(hmac(sha384),"
2333 "cbc(des3_ede)))",
2334 .cra_driver_name = "echainiv-authenc-"
2335 "hmac-sha384-"
2336 "cbc-des3_ede-caam-qi2",
2337 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2338 },
2339 .setkey = des3_aead_setkey,
2340 .setauthsize = aead_setauthsize,
2341 .encrypt = aead_encrypt,
2342 .decrypt = aead_decrypt,
2343 .ivsize = DES3_EDE_BLOCK_SIZE,
2344 .maxauthsize = SHA384_DIGEST_SIZE,
2345 },
2346 .caam = {
2347 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2348 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2349 OP_ALG_AAI_HMAC_PRECOMP,
2350 .geniv = true,
2351 }
2352 },
2353 {
2354 .aead = {
2355 .base = {
2356 .cra_name = "authenc(hmac(sha512),"
2357 "cbc(des3_ede))",
2358 .cra_driver_name = "authenc-hmac-sha512-"
2359 "cbc-des3_ede-caam-qi2",
2360 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2361 },
2362 .setkey = des3_aead_setkey,
2363 .setauthsize = aead_setauthsize,
2364 .encrypt = aead_encrypt,
2365 .decrypt = aead_decrypt,
2366 .ivsize = DES3_EDE_BLOCK_SIZE,
2367 .maxauthsize = SHA512_DIGEST_SIZE,
2368 },
2369 .caam = {
2370 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2371 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2372 OP_ALG_AAI_HMAC_PRECOMP,
2373 },
2374 },
2375 {
2376 .aead = {
2377 .base = {
2378 .cra_name = "echainiv(authenc(hmac(sha512),"
2379 "cbc(des3_ede)))",
2380 .cra_driver_name = "echainiv-authenc-"
2381 "hmac-sha512-"
2382 "cbc-des3_ede-caam-qi2",
2383 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2384 },
2385 .setkey = des3_aead_setkey,
2386 .setauthsize = aead_setauthsize,
2387 .encrypt = aead_encrypt,
2388 .decrypt = aead_decrypt,
2389 .ivsize = DES3_EDE_BLOCK_SIZE,
2390 .maxauthsize = SHA512_DIGEST_SIZE,
2391 },
2392 .caam = {
2393 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2394 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2395 OP_ALG_AAI_HMAC_PRECOMP,
2396 .geniv = true,
2397 }
2398 },
2399 {
2400 .aead = {
2401 .base = {
2402 .cra_name = "authenc(hmac(md5),cbc(des))",
2403 .cra_driver_name = "authenc-hmac-md5-"
2404 "cbc-des-caam-qi2",
2405 .cra_blocksize = DES_BLOCK_SIZE,
2406 },
2407 .setkey = aead_setkey,
2408 .setauthsize = aead_setauthsize,
2409 .encrypt = aead_encrypt,
2410 .decrypt = aead_decrypt,
2411 .ivsize = DES_BLOCK_SIZE,
2412 .maxauthsize = MD5_DIGEST_SIZE,
2413 },
2414 .caam = {
2415 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2416 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2417 OP_ALG_AAI_HMAC_PRECOMP,
2418 },
2419 },
2420 {
2421 .aead = {
2422 .base = {
2423 .cra_name = "echainiv(authenc(hmac(md5),"
2424 "cbc(des)))",
2425 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2426 "cbc-des-caam-qi2",
2427 .cra_blocksize = DES_BLOCK_SIZE,
2428 },
2429 .setkey = aead_setkey,
2430 .setauthsize = aead_setauthsize,
2431 .encrypt = aead_encrypt,
2432 .decrypt = aead_decrypt,
2433 .ivsize = DES_BLOCK_SIZE,
2434 .maxauthsize = MD5_DIGEST_SIZE,
2435 },
2436 .caam = {
2437 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2438 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2439 OP_ALG_AAI_HMAC_PRECOMP,
2440 .geniv = true,
2441 }
2442 },
2443 {
2444 .aead = {
2445 .base = {
2446 .cra_name = "authenc(hmac(sha1),cbc(des))",
2447 .cra_driver_name = "authenc-hmac-sha1-"
2448 "cbc-des-caam-qi2",
2449 .cra_blocksize = DES_BLOCK_SIZE,
2450 },
2451 .setkey = aead_setkey,
2452 .setauthsize = aead_setauthsize,
2453 .encrypt = aead_encrypt,
2454 .decrypt = aead_decrypt,
2455 .ivsize = DES_BLOCK_SIZE,
2456 .maxauthsize = SHA1_DIGEST_SIZE,
2457 },
2458 .caam = {
2459 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2460 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2461 OP_ALG_AAI_HMAC_PRECOMP,
2462 },
2463 },
2464 {
2465 .aead = {
2466 .base = {
2467 .cra_name = "echainiv(authenc(hmac(sha1),"
2468 "cbc(des)))",
2469 .cra_driver_name = "echainiv-authenc-"
2470 "hmac-sha1-cbc-des-caam-qi2",
2471 .cra_blocksize = DES_BLOCK_SIZE,
2472 },
2473 .setkey = aead_setkey,
2474 .setauthsize = aead_setauthsize,
2475 .encrypt = aead_encrypt,
2476 .decrypt = aead_decrypt,
2477 .ivsize = DES_BLOCK_SIZE,
2478 .maxauthsize = SHA1_DIGEST_SIZE,
2479 },
2480 .caam = {
2481 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2482 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2483 OP_ALG_AAI_HMAC_PRECOMP,
2484 .geniv = true,
2485 }
2486 },
2487 {
2488 .aead = {
2489 .base = {
2490 .cra_name = "authenc(hmac(sha224),cbc(des))",
2491 .cra_driver_name = "authenc-hmac-sha224-"
2492 "cbc-des-caam-qi2",
2493 .cra_blocksize = DES_BLOCK_SIZE,
2494 },
2495 .setkey = aead_setkey,
2496 .setauthsize = aead_setauthsize,
2497 .encrypt = aead_encrypt,
2498 .decrypt = aead_decrypt,
2499 .ivsize = DES_BLOCK_SIZE,
2500 .maxauthsize = SHA224_DIGEST_SIZE,
2501 },
2502 .caam = {
2503 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2504 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2505 OP_ALG_AAI_HMAC_PRECOMP,
2506 },
2507 },
2508 {
2509 .aead = {
2510 .base = {
2511 .cra_name = "echainiv(authenc(hmac(sha224),"
2512 "cbc(des)))",
2513 .cra_driver_name = "echainiv-authenc-"
2514 "hmac-sha224-cbc-des-"
2515 "caam-qi2",
2516 .cra_blocksize = DES_BLOCK_SIZE,
2517 },
2518 .setkey = aead_setkey,
2519 .setauthsize = aead_setauthsize,
2520 .encrypt = aead_encrypt,
2521 .decrypt = aead_decrypt,
2522 .ivsize = DES_BLOCK_SIZE,
2523 .maxauthsize = SHA224_DIGEST_SIZE,
2524 },
2525 .caam = {
2526 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2527 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2528 OP_ALG_AAI_HMAC_PRECOMP,
2529 .geniv = true,
2530 }
2531 },
2532 {
2533 .aead = {
2534 .base = {
2535 .cra_name = "authenc(hmac(sha256),cbc(des))",
2536 .cra_driver_name = "authenc-hmac-sha256-"
2537 "cbc-des-caam-qi2",
2538 .cra_blocksize = DES_BLOCK_SIZE,
2539 },
2540 .setkey = aead_setkey,
2541 .setauthsize = aead_setauthsize,
2542 .encrypt = aead_encrypt,
2543 .decrypt = aead_decrypt,
2544 .ivsize = DES_BLOCK_SIZE,
2545 .maxauthsize = SHA256_DIGEST_SIZE,
2546 },
2547 .caam = {
2548 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2549 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2550 OP_ALG_AAI_HMAC_PRECOMP,
2551 },
2552 },
2553 {
2554 .aead = {
2555 .base = {
2556 .cra_name = "echainiv(authenc(hmac(sha256),"
2557 "cbc(des)))",
2558 .cra_driver_name = "echainiv-authenc-"
2559 "hmac-sha256-cbc-des-"
2560 "caam-qi2",
2561 .cra_blocksize = DES_BLOCK_SIZE,
2562 },
2563 .setkey = aead_setkey,
2564 .setauthsize = aead_setauthsize,
2565 .encrypt = aead_encrypt,
2566 .decrypt = aead_decrypt,
2567 .ivsize = DES_BLOCK_SIZE,
2568 .maxauthsize = SHA256_DIGEST_SIZE,
2569 },
2570 .caam = {
2571 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2572 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2573 OP_ALG_AAI_HMAC_PRECOMP,
2574 .geniv = true,
2575 },
2576 },
2577 {
2578 .aead = {
2579 .base = {
2580 .cra_name = "authenc(hmac(sha384),cbc(des))",
2581 .cra_driver_name = "authenc-hmac-sha384-"
2582 "cbc-des-caam-qi2",
2583 .cra_blocksize = DES_BLOCK_SIZE,
2584 },
2585 .setkey = aead_setkey,
2586 .setauthsize = aead_setauthsize,
2587 .encrypt = aead_encrypt,
2588 .decrypt = aead_decrypt,
2589 .ivsize = DES_BLOCK_SIZE,
2590 .maxauthsize = SHA384_DIGEST_SIZE,
2591 },
2592 .caam = {
2593 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2594 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2595 OP_ALG_AAI_HMAC_PRECOMP,
2596 },
2597 },
2598 {
2599 .aead = {
2600 .base = {
2601 .cra_name = "echainiv(authenc(hmac(sha384),"
2602 "cbc(des)))",
2603 .cra_driver_name = "echainiv-authenc-"
2604 "hmac-sha384-cbc-des-"
2605 "caam-qi2",
2606 .cra_blocksize = DES_BLOCK_SIZE,
2607 },
2608 .setkey = aead_setkey,
2609 .setauthsize = aead_setauthsize,
2610 .encrypt = aead_encrypt,
2611 .decrypt = aead_decrypt,
2612 .ivsize = DES_BLOCK_SIZE,
2613 .maxauthsize = SHA384_DIGEST_SIZE,
2614 },
2615 .caam = {
2616 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2617 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2618 OP_ALG_AAI_HMAC_PRECOMP,
2619 .geniv = true,
2620 }
2621 },
2622 {
2623 .aead = {
2624 .base = {
2625 .cra_name = "authenc(hmac(sha512),cbc(des))",
2626 .cra_driver_name = "authenc-hmac-sha512-"
2627 "cbc-des-caam-qi2",
2628 .cra_blocksize = DES_BLOCK_SIZE,
2629 },
2630 .setkey = aead_setkey,
2631 .setauthsize = aead_setauthsize,
2632 .encrypt = aead_encrypt,
2633 .decrypt = aead_decrypt,
2634 .ivsize = DES_BLOCK_SIZE,
2635 .maxauthsize = SHA512_DIGEST_SIZE,
2636 },
2637 .caam = {
2638 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2639 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2640 OP_ALG_AAI_HMAC_PRECOMP,
2641 }
2642 },
2643 {
2644 .aead = {
2645 .base = {
2646 .cra_name = "echainiv(authenc(hmac(sha512),"
2647 "cbc(des)))",
2648 .cra_driver_name = "echainiv-authenc-"
2649 "hmac-sha512-cbc-des-"
2650 "caam-qi2",
2651 .cra_blocksize = DES_BLOCK_SIZE,
2652 },
2653 .setkey = aead_setkey,
2654 .setauthsize = aead_setauthsize,
2655 .encrypt = aead_encrypt,
2656 .decrypt = aead_decrypt,
2657 .ivsize = DES_BLOCK_SIZE,
2658 .maxauthsize = SHA512_DIGEST_SIZE,
2659 },
2660 .caam = {
2661 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2662 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2663 OP_ALG_AAI_HMAC_PRECOMP,
2664 .geniv = true,
2665 }
2666 },
2667 {
2668 .aead = {
2669 .base = {
2670 .cra_name = "authenc(hmac(md5),"
2671 "rfc3686(ctr(aes)))",
2672 .cra_driver_name = "authenc-hmac-md5-"
2673 "rfc3686-ctr-aes-caam-qi2",
2674 .cra_blocksize = 1,
2675 },
2676 .setkey = aead_setkey,
2677 .setauthsize = aead_setauthsize,
2678 .encrypt = aead_encrypt,
2679 .decrypt = aead_decrypt,
2680 .ivsize = CTR_RFC3686_IV_SIZE,
2681 .maxauthsize = MD5_DIGEST_SIZE,
2682 },
2683 .caam = {
2684 .class1_alg_type = OP_ALG_ALGSEL_AES |
2685 OP_ALG_AAI_CTR_MOD128,
2686 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2687 OP_ALG_AAI_HMAC_PRECOMP,
2688 .rfc3686 = true,
2689 },
2690 },
2691 {
2692 .aead = {
2693 .base = {
2694 .cra_name = "seqiv(authenc("
2695 "hmac(md5),rfc3686(ctr(aes))))",
2696 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2697 "rfc3686-ctr-aes-caam-qi2",
2698 .cra_blocksize = 1,
2699 },
2700 .setkey = aead_setkey,
2701 .setauthsize = aead_setauthsize,
2702 .encrypt = aead_encrypt,
2703 .decrypt = aead_decrypt,
2704 .ivsize = CTR_RFC3686_IV_SIZE,
2705 .maxauthsize = MD5_DIGEST_SIZE,
2706 },
2707 .caam = {
2708 .class1_alg_type = OP_ALG_ALGSEL_AES |
2709 OP_ALG_AAI_CTR_MOD128,
2710 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2711 OP_ALG_AAI_HMAC_PRECOMP,
2712 .rfc3686 = true,
2713 .geniv = true,
2714 },
2715 },
2716 {
2717 .aead = {
2718 .base = {
2719 .cra_name = "authenc(hmac(sha1),"
2720 "rfc3686(ctr(aes)))",
2721 .cra_driver_name = "authenc-hmac-sha1-"
2722 "rfc3686-ctr-aes-caam-qi2",
2723 .cra_blocksize = 1,
2724 },
2725 .setkey = aead_setkey,
2726 .setauthsize = aead_setauthsize,
2727 .encrypt = aead_encrypt,
2728 .decrypt = aead_decrypt,
2729 .ivsize = CTR_RFC3686_IV_SIZE,
2730 .maxauthsize = SHA1_DIGEST_SIZE,
2731 },
2732 .caam = {
2733 .class1_alg_type = OP_ALG_ALGSEL_AES |
2734 OP_ALG_AAI_CTR_MOD128,
2735 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2736 OP_ALG_AAI_HMAC_PRECOMP,
2737 .rfc3686 = true,
2738 },
2739 },
2740 {
2741 .aead = {
2742 .base = {
2743 .cra_name = "seqiv(authenc("
2744 "hmac(sha1),rfc3686(ctr(aes))))",
2745 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2746 "rfc3686-ctr-aes-caam-qi2",
2747 .cra_blocksize = 1,
2748 },
2749 .setkey = aead_setkey,
2750 .setauthsize = aead_setauthsize,
2751 .encrypt = aead_encrypt,
2752 .decrypt = aead_decrypt,
2753 .ivsize = CTR_RFC3686_IV_SIZE,
2754 .maxauthsize = SHA1_DIGEST_SIZE,
2755 },
2756 .caam = {
2757 .class1_alg_type = OP_ALG_ALGSEL_AES |
2758 OP_ALG_AAI_CTR_MOD128,
2759 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2760 OP_ALG_AAI_HMAC_PRECOMP,
2761 .rfc3686 = true,
2762 .geniv = true,
2763 },
2764 },
2765 {
2766 .aead = {
2767 .base = {
2768 .cra_name = "authenc(hmac(sha224),"
2769 "rfc3686(ctr(aes)))",
2770 .cra_driver_name = "authenc-hmac-sha224-"
2771 "rfc3686-ctr-aes-caam-qi2",
2772 .cra_blocksize = 1,
2773 },
2774 .setkey = aead_setkey,
2775 .setauthsize = aead_setauthsize,
2776 .encrypt = aead_encrypt,
2777 .decrypt = aead_decrypt,
2778 .ivsize = CTR_RFC3686_IV_SIZE,
2779 .maxauthsize = SHA224_DIGEST_SIZE,
2780 },
2781 .caam = {
2782 .class1_alg_type = OP_ALG_ALGSEL_AES |
2783 OP_ALG_AAI_CTR_MOD128,
2784 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2785 OP_ALG_AAI_HMAC_PRECOMP,
2786 .rfc3686 = true,
2787 },
2788 },
2789 {
2790 .aead = {
2791 .base = {
2792 .cra_name = "seqiv(authenc("
2793 "hmac(sha224),rfc3686(ctr(aes))))",
2794 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2795 "rfc3686-ctr-aes-caam-qi2",
2796 .cra_blocksize = 1,
2797 },
2798 .setkey = aead_setkey,
2799 .setauthsize = aead_setauthsize,
2800 .encrypt = aead_encrypt,
2801 .decrypt = aead_decrypt,
2802 .ivsize = CTR_RFC3686_IV_SIZE,
2803 .maxauthsize = SHA224_DIGEST_SIZE,
2804 },
2805 .caam = {
2806 .class1_alg_type = OP_ALG_ALGSEL_AES |
2807 OP_ALG_AAI_CTR_MOD128,
2808 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2809 OP_ALG_AAI_HMAC_PRECOMP,
2810 .rfc3686 = true,
2811 .geniv = true,
2812 },
2813 },
2814 {
2815 .aead = {
2816 .base = {
2817 .cra_name = "authenc(hmac(sha256),"
2818 "rfc3686(ctr(aes)))",
2819 .cra_driver_name = "authenc-hmac-sha256-"
2820 "rfc3686-ctr-aes-caam-qi2",
2821 .cra_blocksize = 1,
2822 },
2823 .setkey = aead_setkey,
2824 .setauthsize = aead_setauthsize,
2825 .encrypt = aead_encrypt,
2826 .decrypt = aead_decrypt,
2827 .ivsize = CTR_RFC3686_IV_SIZE,
2828 .maxauthsize = SHA256_DIGEST_SIZE,
2829 },
2830 .caam = {
2831 .class1_alg_type = OP_ALG_ALGSEL_AES |
2832 OP_ALG_AAI_CTR_MOD128,
2833 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2834 OP_ALG_AAI_HMAC_PRECOMP,
2835 .rfc3686 = true,
2836 },
2837 },
2838 {
2839 .aead = {
2840 .base = {
2841 .cra_name = "seqiv(authenc(hmac(sha256),"
2842 "rfc3686(ctr(aes))))",
2843 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2844 "rfc3686-ctr-aes-caam-qi2",
2845 .cra_blocksize = 1,
2846 },
2847 .setkey = aead_setkey,
2848 .setauthsize = aead_setauthsize,
2849 .encrypt = aead_encrypt,
2850 .decrypt = aead_decrypt,
2851 .ivsize = CTR_RFC3686_IV_SIZE,
2852 .maxauthsize = SHA256_DIGEST_SIZE,
2853 },
2854 .caam = {
2855 .class1_alg_type = OP_ALG_ALGSEL_AES |
2856 OP_ALG_AAI_CTR_MOD128,
2857 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2858 OP_ALG_AAI_HMAC_PRECOMP,
2859 .rfc3686 = true,
2860 .geniv = true,
2861 },
2862 },
2863 {
2864 .aead = {
2865 .base = {
2866 .cra_name = "authenc(hmac(sha384),"
2867 "rfc3686(ctr(aes)))",
2868 .cra_driver_name = "authenc-hmac-sha384-"
2869 "rfc3686-ctr-aes-caam-qi2",
2870 .cra_blocksize = 1,
2871 },
2872 .setkey = aead_setkey,
2873 .setauthsize = aead_setauthsize,
2874 .encrypt = aead_encrypt,
2875 .decrypt = aead_decrypt,
2876 .ivsize = CTR_RFC3686_IV_SIZE,
2877 .maxauthsize = SHA384_DIGEST_SIZE,
2878 },
2879 .caam = {
2880 .class1_alg_type = OP_ALG_ALGSEL_AES |
2881 OP_ALG_AAI_CTR_MOD128,
2882 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2883 OP_ALG_AAI_HMAC_PRECOMP,
2884 .rfc3686 = true,
2885 },
2886 },
2887 {
2888 .aead = {
2889 .base = {
2890 .cra_name = "seqiv(authenc(hmac(sha384),"
2891 "rfc3686(ctr(aes))))",
2892 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2893 "rfc3686-ctr-aes-caam-qi2",
2894 .cra_blocksize = 1,
2895 },
2896 .setkey = aead_setkey,
2897 .setauthsize = aead_setauthsize,
2898 .encrypt = aead_encrypt,
2899 .decrypt = aead_decrypt,
2900 .ivsize = CTR_RFC3686_IV_SIZE,
2901 .maxauthsize = SHA384_DIGEST_SIZE,
2902 },
2903 .caam = {
2904 .class1_alg_type = OP_ALG_ALGSEL_AES |
2905 OP_ALG_AAI_CTR_MOD128,
2906 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2907 OP_ALG_AAI_HMAC_PRECOMP,
2908 .rfc3686 = true,
2909 .geniv = true,
2910 },
2911 },
2912 {
2913 .aead = {
2914 .base = {
2915 .cra_name = "rfc7539(chacha20,poly1305)",
2916 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2917 "caam-qi2",
2918 .cra_blocksize = 1,
2919 },
2920 .setkey = chachapoly_setkey,
2921 .setauthsize = chachapoly_setauthsize,
2922 .encrypt = aead_encrypt,
2923 .decrypt = aead_decrypt,
2924 .ivsize = CHACHAPOLY_IV_SIZE,
2925 .maxauthsize = POLY1305_DIGEST_SIZE,
2926 },
2927 .caam = {
2928 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2929 OP_ALG_AAI_AEAD,
2930 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2931 OP_ALG_AAI_AEAD,
2932 .nodkp = true,
2933 },
2934 },
2935 {
2936 .aead = {
2937 .base = {
2938 .cra_name = "rfc7539esp(chacha20,poly1305)",
2939 .cra_driver_name = "rfc7539esp-chacha20-"
2940 "poly1305-caam-qi2",
2941 .cra_blocksize = 1,
2942 },
2943 .setkey = chachapoly_setkey,
2944 .setauthsize = chachapoly_setauthsize,
2945 .encrypt = aead_encrypt,
2946 .decrypt = aead_decrypt,
2947 .ivsize = 8,
2948 .maxauthsize = POLY1305_DIGEST_SIZE,
2949 },
2950 .caam = {
2951 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2952 OP_ALG_AAI_AEAD,
2953 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2954 OP_ALG_AAI_AEAD,
2955 .nodkp = true,
2956 },
2957 },
2958 {
2959 .aead = {
2960 .base = {
2961 .cra_name = "authenc(hmac(sha512),"
2962 "rfc3686(ctr(aes)))",
2963 .cra_driver_name = "authenc-hmac-sha512-"
2964 "rfc3686-ctr-aes-caam-qi2",
2965 .cra_blocksize = 1,
2966 },
2967 .setkey = aead_setkey,
2968 .setauthsize = aead_setauthsize,
2969 .encrypt = aead_encrypt,
2970 .decrypt = aead_decrypt,
2971 .ivsize = CTR_RFC3686_IV_SIZE,
2972 .maxauthsize = SHA512_DIGEST_SIZE,
2973 },
2974 .caam = {
2975 .class1_alg_type = OP_ALG_ALGSEL_AES |
2976 OP_ALG_AAI_CTR_MOD128,
2977 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2978 OP_ALG_AAI_HMAC_PRECOMP,
2979 .rfc3686 = true,
2980 },
2981 },
2982 {
2983 .aead = {
2984 .base = {
2985 .cra_name = "seqiv(authenc(hmac(sha512),"
2986 "rfc3686(ctr(aes))))",
2987 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2988 "rfc3686-ctr-aes-caam-qi2",
2989 .cra_blocksize = 1,
2990 },
2991 .setkey = aead_setkey,
2992 .setauthsize = aead_setauthsize,
2993 .encrypt = aead_encrypt,
2994 .decrypt = aead_decrypt,
2995 .ivsize = CTR_RFC3686_IV_SIZE,
2996 .maxauthsize = SHA512_DIGEST_SIZE,
2997 },
2998 .caam = {
2999 .class1_alg_type = OP_ALG_ALGSEL_AES |
3000 OP_ALG_AAI_CTR_MOD128,
3001 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3002 OP_ALG_AAI_HMAC_PRECOMP,
3003 .rfc3686 = true,
3004 .geniv = true,
3005 },
3006 },
3007 };
3008
caam_skcipher_alg_init(struct caam_skcipher_alg * t_alg)3009 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3010 {
3011 struct skcipher_alg *alg = &t_alg->skcipher;
3012
3013 alg->base.cra_module = THIS_MODULE;
3014 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3015 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3016 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3017 CRYPTO_ALG_KERN_DRIVER_ONLY);
3018
3019 alg->init = caam_cra_init_skcipher;
3020 alg->exit = caam_cra_exit;
3021 }
3022
caam_aead_alg_init(struct caam_aead_alg * t_alg)3023 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3024 {
3025 struct aead_alg *alg = &t_alg->aead;
3026
3027 alg->base.cra_module = THIS_MODULE;
3028 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3029 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3030 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3031 CRYPTO_ALG_KERN_DRIVER_ONLY;
3032
3033 alg->init = caam_cra_init_aead;
3034 alg->exit = caam_cra_exit_aead;
3035 }
3036
3037 /* max hash key is max split key size */
3038 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3039
3040 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3041
3042 /* caam context sizes for hashes: running digest + 8 */
3043 #define HASH_MSG_LEN 8
3044 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3045
3046 enum hash_optype {
3047 UPDATE = 0,
3048 UPDATE_FIRST,
3049 FINALIZE,
3050 DIGEST,
3051 HASH_NUM_OP
3052 };
3053
3054 /**
3055 * struct caam_hash_ctx - ahash per-session context
3056 * @flc: Flow Contexts array
3057 * @key: authentication key
3058 * @flc_dma: I/O virtual addresses of the Flow Contexts
3059 * @dev: dpseci device
3060 * @ctx_len: size of Context Register
3061 * @adata: hashing algorithm details
3062 */
3063 struct caam_hash_ctx {
3064 struct caam_flc flc[HASH_NUM_OP];
3065 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3066 dma_addr_t flc_dma[HASH_NUM_OP];
3067 struct device *dev;
3068 int ctx_len;
3069 struct alginfo adata;
3070 };
3071
3072 /* ahash state */
3073 struct caam_hash_state {
3074 struct caam_request caam_req;
3075 dma_addr_t buf_dma;
3076 dma_addr_t ctx_dma;
3077 int ctx_dma_len;
3078 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3079 int buflen;
3080 int next_buflen;
3081 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3082 int (*update)(struct ahash_request *req);
3083 int (*final)(struct ahash_request *req);
3084 int (*finup)(struct ahash_request *req);
3085 };
3086
3087 struct caam_export_state {
3088 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3089 u8 caam_ctx[MAX_CTX_LEN];
3090 int buflen;
3091 int (*update)(struct ahash_request *req);
3092 int (*final)(struct ahash_request *req);
3093 int (*finup)(struct ahash_request *req);
3094 };
3095
3096 /* Map current buffer in state (if length > 0) and put it in link table */
buf_map_to_qm_sg(struct device * dev,struct dpaa2_sg_entry * qm_sg,struct caam_hash_state * state)3097 static inline int buf_map_to_qm_sg(struct device *dev,
3098 struct dpaa2_sg_entry *qm_sg,
3099 struct caam_hash_state *state)
3100 {
3101 int buflen = state->buflen;
3102
3103 if (!buflen)
3104 return 0;
3105
3106 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3107 DMA_TO_DEVICE);
3108 if (dma_mapping_error(dev, state->buf_dma)) {
3109 dev_err(dev, "unable to map buf\n");
3110 state->buf_dma = 0;
3111 return -ENOMEM;
3112 }
3113
3114 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3115
3116 return 0;
3117 }
3118
3119 /* Map state->caam_ctx, and add it to link table */
ctx_map_to_qm_sg(struct device * dev,struct caam_hash_state * state,int ctx_len,struct dpaa2_sg_entry * qm_sg,u32 flag)3120 static inline int ctx_map_to_qm_sg(struct device *dev,
3121 struct caam_hash_state *state, int ctx_len,
3122 struct dpaa2_sg_entry *qm_sg, u32 flag)
3123 {
3124 state->ctx_dma_len = ctx_len;
3125 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3126 if (dma_mapping_error(dev, state->ctx_dma)) {
3127 dev_err(dev, "unable to map ctx\n");
3128 state->ctx_dma = 0;
3129 return -ENOMEM;
3130 }
3131
3132 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3133
3134 return 0;
3135 }
3136
ahash_set_sh_desc(struct crypto_ahash * ahash)3137 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3138 {
3139 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3140 int digestsize = crypto_ahash_digestsize(ahash);
3141 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3142 struct caam_flc *flc;
3143 u32 *desc;
3144
3145 /* ahash_update shared descriptor */
3146 flc = &ctx->flc[UPDATE];
3147 desc = flc->sh_desc;
3148 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3149 ctx->ctx_len, true, priv->sec_attr.era);
3150 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3151 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3152 desc_bytes(desc), DMA_BIDIRECTIONAL);
3153 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3154 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3155 1);
3156
3157 /* ahash_update_first shared descriptor */
3158 flc = &ctx->flc[UPDATE_FIRST];
3159 desc = flc->sh_desc;
3160 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3161 ctx->ctx_len, false, priv->sec_attr.era);
3162 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3163 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3164 desc_bytes(desc), DMA_BIDIRECTIONAL);
3165 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3166 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3167 1);
3168
3169 /* ahash_final shared descriptor */
3170 flc = &ctx->flc[FINALIZE];
3171 desc = flc->sh_desc;
3172 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3173 ctx->ctx_len, true, priv->sec_attr.era);
3174 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3175 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3176 desc_bytes(desc), DMA_BIDIRECTIONAL);
3177 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3178 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3179 1);
3180
3181 /* ahash_digest shared descriptor */
3182 flc = &ctx->flc[DIGEST];
3183 desc = flc->sh_desc;
3184 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3185 ctx->ctx_len, false, priv->sec_attr.era);
3186 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3187 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3188 desc_bytes(desc), DMA_BIDIRECTIONAL);
3189 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3190 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3191 1);
3192
3193 return 0;
3194 }
3195
3196 struct split_key_sh_result {
3197 struct completion completion;
3198 int err;
3199 struct device *dev;
3200 };
3201
split_key_sh_done(void * cbk_ctx,u32 err)3202 static void split_key_sh_done(void *cbk_ctx, u32 err)
3203 {
3204 struct split_key_sh_result *res = cbk_ctx;
3205
3206 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3207
3208 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3209 complete(&res->completion);
3210 }
3211
3212 /* Digest hash size if it is too large */
hash_digest_key(struct caam_hash_ctx * ctx,u32 * keylen,u8 * key,u32 digestsize)3213 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3214 u32 digestsize)
3215 {
3216 struct caam_request *req_ctx;
3217 u32 *desc;
3218 struct split_key_sh_result result;
3219 dma_addr_t key_dma;
3220 struct caam_flc *flc;
3221 dma_addr_t flc_dma;
3222 int ret = -ENOMEM;
3223 struct dpaa2_fl_entry *in_fle, *out_fle;
3224
3225 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
3226 if (!req_ctx)
3227 return -ENOMEM;
3228
3229 in_fle = &req_ctx->fd_flt[1];
3230 out_fle = &req_ctx->fd_flt[0];
3231
3232 flc = kzalloc(sizeof(*flc), GFP_KERNEL);
3233 if (!flc)
3234 goto err_flc;
3235
3236 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3237 if (dma_mapping_error(ctx->dev, key_dma)) {
3238 dev_err(ctx->dev, "unable to map key memory\n");
3239 goto err_key_dma;
3240 }
3241
3242 desc = flc->sh_desc;
3243
3244 init_sh_desc(desc, 0);
3245
3246 /* descriptor to perform unkeyed hash on key_in */
3247 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3248 OP_ALG_AS_INITFINAL);
3249 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3250 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3251 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3252 LDST_SRCDST_BYTE_CONTEXT);
3253
3254 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3255 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3256 desc_bytes(desc), DMA_TO_DEVICE);
3257 if (dma_mapping_error(ctx->dev, flc_dma)) {
3258 dev_err(ctx->dev, "unable to map shared descriptor\n");
3259 goto err_flc_dma;
3260 }
3261
3262 dpaa2_fl_set_final(in_fle, true);
3263 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3264 dpaa2_fl_set_addr(in_fle, key_dma);
3265 dpaa2_fl_set_len(in_fle, *keylen);
3266 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3267 dpaa2_fl_set_addr(out_fle, key_dma);
3268 dpaa2_fl_set_len(out_fle, digestsize);
3269
3270 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3271 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3272 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3273 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3274 1);
3275
3276 result.err = 0;
3277 init_completion(&result.completion);
3278 result.dev = ctx->dev;
3279
3280 req_ctx->flc = flc;
3281 req_ctx->flc_dma = flc_dma;
3282 req_ctx->cbk = split_key_sh_done;
3283 req_ctx->ctx = &result;
3284
3285 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3286 if (ret == -EINPROGRESS) {
3287 /* in progress */
3288 wait_for_completion(&result.completion);
3289 ret = result.err;
3290 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3291 DUMP_PREFIX_ADDRESS, 16, 4, key,
3292 digestsize, 1);
3293 }
3294
3295 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3296 DMA_TO_DEVICE);
3297 err_flc_dma:
3298 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3299 err_key_dma:
3300 kfree(flc);
3301 err_flc:
3302 kfree(req_ctx);
3303
3304 *keylen = digestsize;
3305
3306 return ret;
3307 }
3308
ahash_setkey(struct crypto_ahash * ahash,const u8 * key,unsigned int keylen)3309 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3310 unsigned int keylen)
3311 {
3312 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3313 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3314 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3315 int ret;
3316 u8 *hashed_key = NULL;
3317
3318 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3319
3320 if (keylen > blocksize) {
3321 unsigned int aligned_len =
3322 ALIGN(keylen, dma_get_cache_alignment());
3323
3324 if (aligned_len < keylen)
3325 return -EOVERFLOW;
3326
3327 hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
3328 if (!hashed_key)
3329 return -ENOMEM;
3330 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3331 if (ret)
3332 goto bad_free_key;
3333 key = hashed_key;
3334 }
3335
3336 ctx->adata.keylen = keylen;
3337 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3338 OP_ALG_ALGSEL_MASK);
3339 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3340 goto bad_free_key;
3341
3342 ctx->adata.key_virt = key;
3343 ctx->adata.key_inline = true;
3344
3345 /*
3346 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3347 * in invalid opcodes (last bytes of user key) in the resulting
3348 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3349 * addresses are needed.
3350 */
3351 if (keylen > ctx->adata.keylen_pad) {
3352 memcpy(ctx->key, key, keylen);
3353 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3354 ctx->adata.keylen_pad,
3355 DMA_TO_DEVICE);
3356 }
3357
3358 ret = ahash_set_sh_desc(ahash);
3359 kfree(hashed_key);
3360 return ret;
3361 bad_free_key:
3362 kfree(hashed_key);
3363 return -EINVAL;
3364 }
3365
ahash_unmap(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req)3366 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3367 struct ahash_request *req)
3368 {
3369 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3370
3371 if (edesc->src_nents)
3372 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3373
3374 if (edesc->qm_sg_bytes)
3375 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3376 DMA_TO_DEVICE);
3377
3378 if (state->buf_dma) {
3379 dma_unmap_single(dev, state->buf_dma, state->buflen,
3380 DMA_TO_DEVICE);
3381 state->buf_dma = 0;
3382 }
3383 }
3384
ahash_unmap_ctx(struct device * dev,struct ahash_edesc * edesc,struct ahash_request * req,u32 flag)3385 static inline void ahash_unmap_ctx(struct device *dev,
3386 struct ahash_edesc *edesc,
3387 struct ahash_request *req, u32 flag)
3388 {
3389 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3390
3391 if (state->ctx_dma) {
3392 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3393 state->ctx_dma = 0;
3394 }
3395 ahash_unmap(dev, edesc, req);
3396 }
3397
ahash_done(void * cbk_ctx,u32 status)3398 static void ahash_done(void *cbk_ctx, u32 status)
3399 {
3400 struct crypto_async_request *areq = cbk_ctx;
3401 struct ahash_request *req = ahash_request_cast(areq);
3402 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3403 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3404 struct ahash_edesc *edesc = state->caam_req.edesc;
3405 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3406 int digestsize = crypto_ahash_digestsize(ahash);
3407 int ecode = 0;
3408
3409 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3410
3411 if (unlikely(status))
3412 ecode = caam_qi2_strstatus(ctx->dev, status);
3413
3414 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3415 memcpy(req->result, state->caam_ctx, digestsize);
3416 qi_cache_free(edesc);
3417
3418 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3419 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3420 ctx->ctx_len, 1);
3421
3422 ahash_request_complete(req, ecode);
3423 }
3424
ahash_done_bi(void * cbk_ctx,u32 status)3425 static void ahash_done_bi(void *cbk_ctx, u32 status)
3426 {
3427 struct crypto_async_request *areq = cbk_ctx;
3428 struct ahash_request *req = ahash_request_cast(areq);
3429 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3430 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3431 struct ahash_edesc *edesc = state->caam_req.edesc;
3432 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3433 int ecode = 0;
3434
3435 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3436
3437 if (unlikely(status))
3438 ecode = caam_qi2_strstatus(ctx->dev, status);
3439
3440 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3441 qi_cache_free(edesc);
3442
3443 scatterwalk_map_and_copy(state->buf, req->src,
3444 req->nbytes - state->next_buflen,
3445 state->next_buflen, 0);
3446 state->buflen = state->next_buflen;
3447
3448 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3449 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3450 state->buflen, 1);
3451
3452 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3453 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3454 ctx->ctx_len, 1);
3455 if (req->result)
3456 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3457 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3458 crypto_ahash_digestsize(ahash), 1);
3459
3460 ahash_request_complete(req, ecode);
3461 }
3462
ahash_done_ctx_src(void * cbk_ctx,u32 status)3463 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3464 {
3465 struct crypto_async_request *areq = cbk_ctx;
3466 struct ahash_request *req = ahash_request_cast(areq);
3467 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3468 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3469 struct ahash_edesc *edesc = state->caam_req.edesc;
3470 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3471 int digestsize = crypto_ahash_digestsize(ahash);
3472 int ecode = 0;
3473
3474 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3475
3476 if (unlikely(status))
3477 ecode = caam_qi2_strstatus(ctx->dev, status);
3478
3479 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3480 memcpy(req->result, state->caam_ctx, digestsize);
3481 qi_cache_free(edesc);
3482
3483 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3484 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3485 ctx->ctx_len, 1);
3486
3487 ahash_request_complete(req, ecode);
3488 }
3489
ahash_done_ctx_dst(void * cbk_ctx,u32 status)3490 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3491 {
3492 struct crypto_async_request *areq = cbk_ctx;
3493 struct ahash_request *req = ahash_request_cast(areq);
3494 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3495 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3496 struct ahash_edesc *edesc = state->caam_req.edesc;
3497 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3498 int ecode = 0;
3499
3500 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3501
3502 if (unlikely(status))
3503 ecode = caam_qi2_strstatus(ctx->dev, status);
3504
3505 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3506 qi_cache_free(edesc);
3507
3508 scatterwalk_map_and_copy(state->buf, req->src,
3509 req->nbytes - state->next_buflen,
3510 state->next_buflen, 0);
3511 state->buflen = state->next_buflen;
3512
3513 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3514 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3515 state->buflen, 1);
3516
3517 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3518 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3519 ctx->ctx_len, 1);
3520 if (req->result)
3521 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3522 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3523 crypto_ahash_digestsize(ahash), 1);
3524
3525 ahash_request_complete(req, ecode);
3526 }
3527
ahash_update_ctx(struct ahash_request * req)3528 static int ahash_update_ctx(struct ahash_request *req)
3529 {
3530 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3531 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3532 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3533 struct caam_request *req_ctx = &state->caam_req;
3534 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3535 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3536 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3537 GFP_KERNEL : GFP_ATOMIC;
3538 u8 *buf = state->buf;
3539 int *buflen = &state->buflen;
3540 int *next_buflen = &state->next_buflen;
3541 int in_len = *buflen + req->nbytes, to_hash;
3542 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3543 struct ahash_edesc *edesc;
3544 int ret = 0;
3545
3546 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3547 to_hash = in_len - *next_buflen;
3548
3549 if (to_hash) {
3550 struct dpaa2_sg_entry *sg_table;
3551 int src_len = req->nbytes - *next_buflen;
3552
3553 src_nents = sg_nents_for_len(req->src, src_len);
3554 if (src_nents < 0) {
3555 dev_err(ctx->dev, "Invalid number of src SG.\n");
3556 return src_nents;
3557 }
3558
3559 if (src_nents) {
3560 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3561 DMA_TO_DEVICE);
3562 if (!mapped_nents) {
3563 dev_err(ctx->dev, "unable to DMA map source\n");
3564 return -ENOMEM;
3565 }
3566 } else {
3567 mapped_nents = 0;
3568 }
3569
3570 /* allocate space for base edesc and link tables */
3571 edesc = qi_cache_zalloc(flags);
3572 if (!edesc) {
3573 dma_unmap_sg(ctx->dev, req->src, src_nents,
3574 DMA_TO_DEVICE);
3575 return -ENOMEM;
3576 }
3577
3578 edesc->src_nents = src_nents;
3579 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3580 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3581 sizeof(*sg_table);
3582 sg_table = &edesc->sgt[0];
3583
3584 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3585 DMA_BIDIRECTIONAL);
3586 if (ret)
3587 goto unmap_ctx;
3588
3589 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3590 if (ret)
3591 goto unmap_ctx;
3592
3593 if (mapped_nents) {
3594 sg_to_qm_sg_last(req->src, src_len,
3595 sg_table + qm_sg_src_index, 0);
3596 } else {
3597 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3598 true);
3599 }
3600
3601 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3602 qm_sg_bytes, DMA_TO_DEVICE);
3603 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3604 dev_err(ctx->dev, "unable to map S/G table\n");
3605 ret = -ENOMEM;
3606 goto unmap_ctx;
3607 }
3608 edesc->qm_sg_bytes = qm_sg_bytes;
3609
3610 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3611 dpaa2_fl_set_final(in_fle, true);
3612 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3613 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3614 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3615 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3616 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3617 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3618
3619 req_ctx->flc = &ctx->flc[UPDATE];
3620 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3621 req_ctx->cbk = ahash_done_bi;
3622 req_ctx->ctx = &req->base;
3623 req_ctx->edesc = edesc;
3624
3625 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3626 if (ret != -EINPROGRESS &&
3627 !(ret == -EBUSY &&
3628 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3629 goto unmap_ctx;
3630 } else if (*next_buflen) {
3631 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3632 req->nbytes, 0);
3633 *buflen = *next_buflen;
3634
3635 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3636 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3637 *buflen, 1);
3638 }
3639
3640 return ret;
3641 unmap_ctx:
3642 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3643 qi_cache_free(edesc);
3644 return ret;
3645 }
3646
ahash_final_ctx(struct ahash_request * req)3647 static int ahash_final_ctx(struct ahash_request *req)
3648 {
3649 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3650 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3651 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3652 struct caam_request *req_ctx = &state->caam_req;
3653 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3654 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3655 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3656 GFP_KERNEL : GFP_ATOMIC;
3657 int buflen = state->buflen;
3658 int qm_sg_bytes;
3659 int digestsize = crypto_ahash_digestsize(ahash);
3660 struct ahash_edesc *edesc;
3661 struct dpaa2_sg_entry *sg_table;
3662 int ret;
3663
3664 /* allocate space for base edesc and link tables */
3665 edesc = qi_cache_zalloc(flags);
3666 if (!edesc)
3667 return -ENOMEM;
3668
3669 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3670 sg_table = &edesc->sgt[0];
3671
3672 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3673 DMA_BIDIRECTIONAL);
3674 if (ret)
3675 goto unmap_ctx;
3676
3677 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3678 if (ret)
3679 goto unmap_ctx;
3680
3681 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3682
3683 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3684 DMA_TO_DEVICE);
3685 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3686 dev_err(ctx->dev, "unable to map S/G table\n");
3687 ret = -ENOMEM;
3688 goto unmap_ctx;
3689 }
3690 edesc->qm_sg_bytes = qm_sg_bytes;
3691
3692 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3693 dpaa2_fl_set_final(in_fle, true);
3694 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3695 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3696 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3697 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3698 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3699 dpaa2_fl_set_len(out_fle, digestsize);
3700
3701 req_ctx->flc = &ctx->flc[FINALIZE];
3702 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3703 req_ctx->cbk = ahash_done_ctx_src;
3704 req_ctx->ctx = &req->base;
3705 req_ctx->edesc = edesc;
3706
3707 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3708 if (ret == -EINPROGRESS ||
3709 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3710 return ret;
3711
3712 unmap_ctx:
3713 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3714 qi_cache_free(edesc);
3715 return ret;
3716 }
3717
ahash_finup_ctx(struct ahash_request * req)3718 static int ahash_finup_ctx(struct ahash_request *req)
3719 {
3720 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3721 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3722 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3723 struct caam_request *req_ctx = &state->caam_req;
3724 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3725 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3726 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3727 GFP_KERNEL : GFP_ATOMIC;
3728 int buflen = state->buflen;
3729 int qm_sg_bytes, qm_sg_src_index;
3730 int src_nents, mapped_nents;
3731 int digestsize = crypto_ahash_digestsize(ahash);
3732 struct ahash_edesc *edesc;
3733 struct dpaa2_sg_entry *sg_table;
3734 int ret;
3735
3736 src_nents = sg_nents_for_len(req->src, req->nbytes);
3737 if (src_nents < 0) {
3738 dev_err(ctx->dev, "Invalid number of src SG.\n");
3739 return src_nents;
3740 }
3741
3742 if (src_nents) {
3743 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3744 DMA_TO_DEVICE);
3745 if (!mapped_nents) {
3746 dev_err(ctx->dev, "unable to DMA map source\n");
3747 return -ENOMEM;
3748 }
3749 } else {
3750 mapped_nents = 0;
3751 }
3752
3753 /* allocate space for base edesc and link tables */
3754 edesc = qi_cache_zalloc(flags);
3755 if (!edesc) {
3756 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3757 return -ENOMEM;
3758 }
3759
3760 edesc->src_nents = src_nents;
3761 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3762 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3763 sizeof(*sg_table);
3764 sg_table = &edesc->sgt[0];
3765
3766 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3767 DMA_BIDIRECTIONAL);
3768 if (ret)
3769 goto unmap_ctx;
3770
3771 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3772 if (ret)
3773 goto unmap_ctx;
3774
3775 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3776
3777 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3778 DMA_TO_DEVICE);
3779 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3780 dev_err(ctx->dev, "unable to map S/G table\n");
3781 ret = -ENOMEM;
3782 goto unmap_ctx;
3783 }
3784 edesc->qm_sg_bytes = qm_sg_bytes;
3785
3786 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3787 dpaa2_fl_set_final(in_fle, true);
3788 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3789 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3790 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3791 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3792 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3793 dpaa2_fl_set_len(out_fle, digestsize);
3794
3795 req_ctx->flc = &ctx->flc[FINALIZE];
3796 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3797 req_ctx->cbk = ahash_done_ctx_src;
3798 req_ctx->ctx = &req->base;
3799 req_ctx->edesc = edesc;
3800
3801 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3802 if (ret == -EINPROGRESS ||
3803 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3804 return ret;
3805
3806 unmap_ctx:
3807 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3808 qi_cache_free(edesc);
3809 return ret;
3810 }
3811
ahash_digest(struct ahash_request * req)3812 static int ahash_digest(struct ahash_request *req)
3813 {
3814 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3815 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3816 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3817 struct caam_request *req_ctx = &state->caam_req;
3818 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3819 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3820 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3821 GFP_KERNEL : GFP_ATOMIC;
3822 int digestsize = crypto_ahash_digestsize(ahash);
3823 int src_nents, mapped_nents;
3824 struct ahash_edesc *edesc;
3825 int ret = -ENOMEM;
3826
3827 state->buf_dma = 0;
3828
3829 src_nents = sg_nents_for_len(req->src, req->nbytes);
3830 if (src_nents < 0) {
3831 dev_err(ctx->dev, "Invalid number of src SG.\n");
3832 return src_nents;
3833 }
3834
3835 if (src_nents) {
3836 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3837 DMA_TO_DEVICE);
3838 if (!mapped_nents) {
3839 dev_err(ctx->dev, "unable to map source for DMA\n");
3840 return ret;
3841 }
3842 } else {
3843 mapped_nents = 0;
3844 }
3845
3846 /* allocate space for base edesc and link tables */
3847 edesc = qi_cache_zalloc(flags);
3848 if (!edesc) {
3849 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3850 return ret;
3851 }
3852
3853 edesc->src_nents = src_nents;
3854 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3855
3856 if (mapped_nents > 1) {
3857 int qm_sg_bytes;
3858 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3859
3860 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3861 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3862 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3863 qm_sg_bytes, DMA_TO_DEVICE);
3864 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3865 dev_err(ctx->dev, "unable to map S/G table\n");
3866 goto unmap;
3867 }
3868 edesc->qm_sg_bytes = qm_sg_bytes;
3869 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3870 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3871 } else {
3872 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3873 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3874 }
3875
3876 state->ctx_dma_len = digestsize;
3877 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3878 DMA_FROM_DEVICE);
3879 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3880 dev_err(ctx->dev, "unable to map ctx\n");
3881 state->ctx_dma = 0;
3882 goto unmap;
3883 }
3884
3885 dpaa2_fl_set_final(in_fle, true);
3886 dpaa2_fl_set_len(in_fle, req->nbytes);
3887 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3888 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3889 dpaa2_fl_set_len(out_fle, digestsize);
3890
3891 req_ctx->flc = &ctx->flc[DIGEST];
3892 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3893 req_ctx->cbk = ahash_done;
3894 req_ctx->ctx = &req->base;
3895 req_ctx->edesc = edesc;
3896 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3897 if (ret == -EINPROGRESS ||
3898 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3899 return ret;
3900
3901 unmap:
3902 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3903 qi_cache_free(edesc);
3904 return ret;
3905 }
3906
ahash_final_no_ctx(struct ahash_request * req)3907 static int ahash_final_no_ctx(struct ahash_request *req)
3908 {
3909 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3910 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3911 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3912 struct caam_request *req_ctx = &state->caam_req;
3913 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3914 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3915 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3916 GFP_KERNEL : GFP_ATOMIC;
3917 u8 *buf = state->buf;
3918 int buflen = state->buflen;
3919 int digestsize = crypto_ahash_digestsize(ahash);
3920 struct ahash_edesc *edesc;
3921 int ret = -ENOMEM;
3922
3923 /* allocate space for base edesc and link tables */
3924 edesc = qi_cache_zalloc(flags);
3925 if (!edesc)
3926 return ret;
3927
3928 if (buflen) {
3929 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3930 DMA_TO_DEVICE);
3931 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3932 dev_err(ctx->dev, "unable to map src\n");
3933 goto unmap;
3934 }
3935 }
3936
3937 state->ctx_dma_len = digestsize;
3938 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3939 DMA_FROM_DEVICE);
3940 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3941 dev_err(ctx->dev, "unable to map ctx\n");
3942 state->ctx_dma = 0;
3943 goto unmap;
3944 }
3945
3946 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3947 dpaa2_fl_set_final(in_fle, true);
3948 /*
3949 * crypto engine requires the input entry to be present when
3950 * "frame list" FD is used.
3951 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3952 * in_fle zeroized (except for "Final" flag) is the best option.
3953 */
3954 if (buflen) {
3955 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3956 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3957 dpaa2_fl_set_len(in_fle, buflen);
3958 }
3959 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3960 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3961 dpaa2_fl_set_len(out_fle, digestsize);
3962
3963 req_ctx->flc = &ctx->flc[DIGEST];
3964 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3965 req_ctx->cbk = ahash_done;
3966 req_ctx->ctx = &req->base;
3967 req_ctx->edesc = edesc;
3968
3969 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3970 if (ret == -EINPROGRESS ||
3971 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3972 return ret;
3973
3974 unmap:
3975 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3976 qi_cache_free(edesc);
3977 return ret;
3978 }
3979
ahash_update_no_ctx(struct ahash_request * req)3980 static int ahash_update_no_ctx(struct ahash_request *req)
3981 {
3982 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3983 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3984 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3985 struct caam_request *req_ctx = &state->caam_req;
3986 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3987 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3988 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3989 GFP_KERNEL : GFP_ATOMIC;
3990 u8 *buf = state->buf;
3991 int *buflen = &state->buflen;
3992 int *next_buflen = &state->next_buflen;
3993 int in_len = *buflen + req->nbytes, to_hash;
3994 int qm_sg_bytes, src_nents, mapped_nents;
3995 struct ahash_edesc *edesc;
3996 int ret = 0;
3997
3998 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3999 to_hash = in_len - *next_buflen;
4000
4001 if (to_hash) {
4002 struct dpaa2_sg_entry *sg_table;
4003 int src_len = req->nbytes - *next_buflen;
4004
4005 src_nents = sg_nents_for_len(req->src, src_len);
4006 if (src_nents < 0) {
4007 dev_err(ctx->dev, "Invalid number of src SG.\n");
4008 return src_nents;
4009 }
4010
4011 if (src_nents) {
4012 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4013 DMA_TO_DEVICE);
4014 if (!mapped_nents) {
4015 dev_err(ctx->dev, "unable to DMA map source\n");
4016 return -ENOMEM;
4017 }
4018 } else {
4019 mapped_nents = 0;
4020 }
4021
4022 /* allocate space for base edesc and link tables */
4023 edesc = qi_cache_zalloc(flags);
4024 if (!edesc) {
4025 dma_unmap_sg(ctx->dev, req->src, src_nents,
4026 DMA_TO_DEVICE);
4027 return -ENOMEM;
4028 }
4029
4030 edesc->src_nents = src_nents;
4031 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4032 sizeof(*sg_table);
4033 sg_table = &edesc->sgt[0];
4034
4035 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4036 if (ret)
4037 goto unmap_ctx;
4038
4039 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4040
4041 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4042 qm_sg_bytes, DMA_TO_DEVICE);
4043 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4044 dev_err(ctx->dev, "unable to map S/G table\n");
4045 ret = -ENOMEM;
4046 goto unmap_ctx;
4047 }
4048 edesc->qm_sg_bytes = qm_sg_bytes;
4049
4050 state->ctx_dma_len = ctx->ctx_len;
4051 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4052 ctx->ctx_len, DMA_FROM_DEVICE);
4053 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4054 dev_err(ctx->dev, "unable to map ctx\n");
4055 state->ctx_dma = 0;
4056 ret = -ENOMEM;
4057 goto unmap_ctx;
4058 }
4059
4060 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4061 dpaa2_fl_set_final(in_fle, true);
4062 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4063 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4064 dpaa2_fl_set_len(in_fle, to_hash);
4065 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4066 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4067 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4068
4069 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4070 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4071 req_ctx->cbk = ahash_done_ctx_dst;
4072 req_ctx->ctx = &req->base;
4073 req_ctx->edesc = edesc;
4074
4075 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4076 if (ret != -EINPROGRESS &&
4077 !(ret == -EBUSY &&
4078 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4079 goto unmap_ctx;
4080
4081 state->update = ahash_update_ctx;
4082 state->finup = ahash_finup_ctx;
4083 state->final = ahash_final_ctx;
4084 } else if (*next_buflen) {
4085 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4086 req->nbytes, 0);
4087 *buflen = *next_buflen;
4088
4089 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4090 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4091 *buflen, 1);
4092 }
4093
4094 return ret;
4095 unmap_ctx:
4096 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4097 qi_cache_free(edesc);
4098 return ret;
4099 }
4100
ahash_finup_no_ctx(struct ahash_request * req)4101 static int ahash_finup_no_ctx(struct ahash_request *req)
4102 {
4103 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4104 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4105 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4106 struct caam_request *req_ctx = &state->caam_req;
4107 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4108 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4109 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4110 GFP_KERNEL : GFP_ATOMIC;
4111 int buflen = state->buflen;
4112 int qm_sg_bytes, src_nents, mapped_nents;
4113 int digestsize = crypto_ahash_digestsize(ahash);
4114 struct ahash_edesc *edesc;
4115 struct dpaa2_sg_entry *sg_table;
4116 int ret = -ENOMEM;
4117
4118 src_nents = sg_nents_for_len(req->src, req->nbytes);
4119 if (src_nents < 0) {
4120 dev_err(ctx->dev, "Invalid number of src SG.\n");
4121 return src_nents;
4122 }
4123
4124 if (src_nents) {
4125 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4126 DMA_TO_DEVICE);
4127 if (!mapped_nents) {
4128 dev_err(ctx->dev, "unable to DMA map source\n");
4129 return ret;
4130 }
4131 } else {
4132 mapped_nents = 0;
4133 }
4134
4135 /* allocate space for base edesc and link tables */
4136 edesc = qi_cache_zalloc(flags);
4137 if (!edesc) {
4138 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4139 return ret;
4140 }
4141
4142 edesc->src_nents = src_nents;
4143 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4144 sg_table = &edesc->sgt[0];
4145
4146 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4147 if (ret)
4148 goto unmap;
4149
4150 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4151
4152 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4153 DMA_TO_DEVICE);
4154 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4155 dev_err(ctx->dev, "unable to map S/G table\n");
4156 ret = -ENOMEM;
4157 goto unmap;
4158 }
4159 edesc->qm_sg_bytes = qm_sg_bytes;
4160
4161 state->ctx_dma_len = digestsize;
4162 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4163 DMA_FROM_DEVICE);
4164 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4165 dev_err(ctx->dev, "unable to map ctx\n");
4166 state->ctx_dma = 0;
4167 ret = -ENOMEM;
4168 goto unmap;
4169 }
4170
4171 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4172 dpaa2_fl_set_final(in_fle, true);
4173 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4174 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4175 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4176 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4177 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4178 dpaa2_fl_set_len(out_fle, digestsize);
4179
4180 req_ctx->flc = &ctx->flc[DIGEST];
4181 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4182 req_ctx->cbk = ahash_done;
4183 req_ctx->ctx = &req->base;
4184 req_ctx->edesc = edesc;
4185 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4186 if (ret != -EINPROGRESS &&
4187 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4188 goto unmap;
4189
4190 return ret;
4191 unmap:
4192 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4193 qi_cache_free(edesc);
4194 return ret;
4195 }
4196
ahash_update_first(struct ahash_request * req)4197 static int ahash_update_first(struct ahash_request *req)
4198 {
4199 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4200 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4201 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4202 struct caam_request *req_ctx = &state->caam_req;
4203 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4204 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4205 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4206 GFP_KERNEL : GFP_ATOMIC;
4207 u8 *buf = state->buf;
4208 int *buflen = &state->buflen;
4209 int *next_buflen = &state->next_buflen;
4210 int to_hash;
4211 int src_nents, mapped_nents;
4212 struct ahash_edesc *edesc;
4213 int ret = 0;
4214
4215 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4216 1);
4217 to_hash = req->nbytes - *next_buflen;
4218
4219 if (to_hash) {
4220 struct dpaa2_sg_entry *sg_table;
4221 int src_len = req->nbytes - *next_buflen;
4222
4223 src_nents = sg_nents_for_len(req->src, src_len);
4224 if (src_nents < 0) {
4225 dev_err(ctx->dev, "Invalid number of src SG.\n");
4226 return src_nents;
4227 }
4228
4229 if (src_nents) {
4230 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4231 DMA_TO_DEVICE);
4232 if (!mapped_nents) {
4233 dev_err(ctx->dev, "unable to map source for DMA\n");
4234 return -ENOMEM;
4235 }
4236 } else {
4237 mapped_nents = 0;
4238 }
4239
4240 /* allocate space for base edesc and link tables */
4241 edesc = qi_cache_zalloc(flags);
4242 if (!edesc) {
4243 dma_unmap_sg(ctx->dev, req->src, src_nents,
4244 DMA_TO_DEVICE);
4245 return -ENOMEM;
4246 }
4247
4248 edesc->src_nents = src_nents;
4249 sg_table = &edesc->sgt[0];
4250
4251 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4252 dpaa2_fl_set_final(in_fle, true);
4253 dpaa2_fl_set_len(in_fle, to_hash);
4254
4255 if (mapped_nents > 1) {
4256 int qm_sg_bytes;
4257
4258 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4259 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4260 sizeof(*sg_table);
4261 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4262 qm_sg_bytes,
4263 DMA_TO_DEVICE);
4264 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4265 dev_err(ctx->dev, "unable to map S/G table\n");
4266 ret = -ENOMEM;
4267 goto unmap_ctx;
4268 }
4269 edesc->qm_sg_bytes = qm_sg_bytes;
4270 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4271 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4272 } else {
4273 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4274 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4275 }
4276
4277 state->ctx_dma_len = ctx->ctx_len;
4278 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4279 ctx->ctx_len, DMA_FROM_DEVICE);
4280 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4281 dev_err(ctx->dev, "unable to map ctx\n");
4282 state->ctx_dma = 0;
4283 ret = -ENOMEM;
4284 goto unmap_ctx;
4285 }
4286
4287 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4288 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4289 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4290
4291 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4292 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4293 req_ctx->cbk = ahash_done_ctx_dst;
4294 req_ctx->ctx = &req->base;
4295 req_ctx->edesc = edesc;
4296
4297 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4298 if (ret != -EINPROGRESS &&
4299 !(ret == -EBUSY && req->base.flags &
4300 CRYPTO_TFM_REQ_MAY_BACKLOG))
4301 goto unmap_ctx;
4302
4303 state->update = ahash_update_ctx;
4304 state->finup = ahash_finup_ctx;
4305 state->final = ahash_final_ctx;
4306 } else if (*next_buflen) {
4307 state->update = ahash_update_no_ctx;
4308 state->finup = ahash_finup_no_ctx;
4309 state->final = ahash_final_no_ctx;
4310 scatterwalk_map_and_copy(buf, req->src, 0,
4311 req->nbytes, 0);
4312 *buflen = *next_buflen;
4313
4314 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4315 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4316 *buflen, 1);
4317 }
4318
4319 return ret;
4320 unmap_ctx:
4321 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4322 qi_cache_free(edesc);
4323 return ret;
4324 }
4325
ahash_finup_first(struct ahash_request * req)4326 static int ahash_finup_first(struct ahash_request *req)
4327 {
4328 return ahash_digest(req);
4329 }
4330
ahash_init(struct ahash_request * req)4331 static int ahash_init(struct ahash_request *req)
4332 {
4333 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4334
4335 state->update = ahash_update_first;
4336 state->finup = ahash_finup_first;
4337 state->final = ahash_final_no_ctx;
4338
4339 state->ctx_dma = 0;
4340 state->ctx_dma_len = 0;
4341 state->buf_dma = 0;
4342 state->buflen = 0;
4343 state->next_buflen = 0;
4344
4345 return 0;
4346 }
4347
ahash_update(struct ahash_request * req)4348 static int ahash_update(struct ahash_request *req)
4349 {
4350 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4351
4352 return state->update(req);
4353 }
4354
ahash_finup(struct ahash_request * req)4355 static int ahash_finup(struct ahash_request *req)
4356 {
4357 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4358
4359 return state->finup(req);
4360 }
4361
ahash_final(struct ahash_request * req)4362 static int ahash_final(struct ahash_request *req)
4363 {
4364 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4365
4366 return state->final(req);
4367 }
4368
ahash_export(struct ahash_request * req,void * out)4369 static int ahash_export(struct ahash_request *req, void *out)
4370 {
4371 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4372 struct caam_export_state *export = out;
4373 u8 *buf = state->buf;
4374 int len = state->buflen;
4375
4376 memcpy(export->buf, buf, len);
4377 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4378 export->buflen = len;
4379 export->update = state->update;
4380 export->final = state->final;
4381 export->finup = state->finup;
4382
4383 return 0;
4384 }
4385
ahash_import(struct ahash_request * req,const void * in)4386 static int ahash_import(struct ahash_request *req, const void *in)
4387 {
4388 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4389 const struct caam_export_state *export = in;
4390
4391 memset(state, 0, sizeof(*state));
4392 memcpy(state->buf, export->buf, export->buflen);
4393 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4394 state->buflen = export->buflen;
4395 state->update = export->update;
4396 state->final = export->final;
4397 state->finup = export->finup;
4398
4399 return 0;
4400 }
4401
4402 struct caam_hash_template {
4403 char name[CRYPTO_MAX_ALG_NAME];
4404 char driver_name[CRYPTO_MAX_ALG_NAME];
4405 char hmac_name[CRYPTO_MAX_ALG_NAME];
4406 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4407 unsigned int blocksize;
4408 struct ahash_alg template_ahash;
4409 u32 alg_type;
4410 };
4411
4412 /* ahash descriptors */
4413 static struct caam_hash_template driver_hash[] = {
4414 {
4415 .name = "sha1",
4416 .driver_name = "sha1-caam-qi2",
4417 .hmac_name = "hmac(sha1)",
4418 .hmac_driver_name = "hmac-sha1-caam-qi2",
4419 .blocksize = SHA1_BLOCK_SIZE,
4420 .template_ahash = {
4421 .init = ahash_init,
4422 .update = ahash_update,
4423 .final = ahash_final,
4424 .finup = ahash_finup,
4425 .digest = ahash_digest,
4426 .export = ahash_export,
4427 .import = ahash_import,
4428 .setkey = ahash_setkey,
4429 .halg = {
4430 .digestsize = SHA1_DIGEST_SIZE,
4431 .statesize = sizeof(struct caam_export_state),
4432 },
4433 },
4434 .alg_type = OP_ALG_ALGSEL_SHA1,
4435 }, {
4436 .name = "sha224",
4437 .driver_name = "sha224-caam-qi2",
4438 .hmac_name = "hmac(sha224)",
4439 .hmac_driver_name = "hmac-sha224-caam-qi2",
4440 .blocksize = SHA224_BLOCK_SIZE,
4441 .template_ahash = {
4442 .init = ahash_init,
4443 .update = ahash_update,
4444 .final = ahash_final,
4445 .finup = ahash_finup,
4446 .digest = ahash_digest,
4447 .export = ahash_export,
4448 .import = ahash_import,
4449 .setkey = ahash_setkey,
4450 .halg = {
4451 .digestsize = SHA224_DIGEST_SIZE,
4452 .statesize = sizeof(struct caam_export_state),
4453 },
4454 },
4455 .alg_type = OP_ALG_ALGSEL_SHA224,
4456 }, {
4457 .name = "sha256",
4458 .driver_name = "sha256-caam-qi2",
4459 .hmac_name = "hmac(sha256)",
4460 .hmac_driver_name = "hmac-sha256-caam-qi2",
4461 .blocksize = SHA256_BLOCK_SIZE,
4462 .template_ahash = {
4463 .init = ahash_init,
4464 .update = ahash_update,
4465 .final = ahash_final,
4466 .finup = ahash_finup,
4467 .digest = ahash_digest,
4468 .export = ahash_export,
4469 .import = ahash_import,
4470 .setkey = ahash_setkey,
4471 .halg = {
4472 .digestsize = SHA256_DIGEST_SIZE,
4473 .statesize = sizeof(struct caam_export_state),
4474 },
4475 },
4476 .alg_type = OP_ALG_ALGSEL_SHA256,
4477 }, {
4478 .name = "sha384",
4479 .driver_name = "sha384-caam-qi2",
4480 .hmac_name = "hmac(sha384)",
4481 .hmac_driver_name = "hmac-sha384-caam-qi2",
4482 .blocksize = SHA384_BLOCK_SIZE,
4483 .template_ahash = {
4484 .init = ahash_init,
4485 .update = ahash_update,
4486 .final = ahash_final,
4487 .finup = ahash_finup,
4488 .digest = ahash_digest,
4489 .export = ahash_export,
4490 .import = ahash_import,
4491 .setkey = ahash_setkey,
4492 .halg = {
4493 .digestsize = SHA384_DIGEST_SIZE,
4494 .statesize = sizeof(struct caam_export_state),
4495 },
4496 },
4497 .alg_type = OP_ALG_ALGSEL_SHA384,
4498 }, {
4499 .name = "sha512",
4500 .driver_name = "sha512-caam-qi2",
4501 .hmac_name = "hmac(sha512)",
4502 .hmac_driver_name = "hmac-sha512-caam-qi2",
4503 .blocksize = SHA512_BLOCK_SIZE,
4504 .template_ahash = {
4505 .init = ahash_init,
4506 .update = ahash_update,
4507 .final = ahash_final,
4508 .finup = ahash_finup,
4509 .digest = ahash_digest,
4510 .export = ahash_export,
4511 .import = ahash_import,
4512 .setkey = ahash_setkey,
4513 .halg = {
4514 .digestsize = SHA512_DIGEST_SIZE,
4515 .statesize = sizeof(struct caam_export_state),
4516 },
4517 },
4518 .alg_type = OP_ALG_ALGSEL_SHA512,
4519 }, {
4520 .name = "md5",
4521 .driver_name = "md5-caam-qi2",
4522 .hmac_name = "hmac(md5)",
4523 .hmac_driver_name = "hmac-md5-caam-qi2",
4524 .blocksize = MD5_BLOCK_WORDS * 4,
4525 .template_ahash = {
4526 .init = ahash_init,
4527 .update = ahash_update,
4528 .final = ahash_final,
4529 .finup = ahash_finup,
4530 .digest = ahash_digest,
4531 .export = ahash_export,
4532 .import = ahash_import,
4533 .setkey = ahash_setkey,
4534 .halg = {
4535 .digestsize = MD5_DIGEST_SIZE,
4536 .statesize = sizeof(struct caam_export_state),
4537 },
4538 },
4539 .alg_type = OP_ALG_ALGSEL_MD5,
4540 }
4541 };
4542
4543 struct caam_hash_alg {
4544 struct list_head entry;
4545 struct device *dev;
4546 int alg_type;
4547 struct ahash_alg ahash_alg;
4548 };
4549
caam_hash_cra_init(struct crypto_tfm * tfm)4550 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4551 {
4552 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4553 struct crypto_alg *base = tfm->__crt_alg;
4554 struct hash_alg_common *halg =
4555 container_of(base, struct hash_alg_common, base);
4556 struct ahash_alg *alg =
4557 container_of(halg, struct ahash_alg, halg);
4558 struct caam_hash_alg *caam_hash =
4559 container_of(alg, struct caam_hash_alg, ahash_alg);
4560 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4561 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4562 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4563 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4564 HASH_MSG_LEN + 32,
4565 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4566 HASH_MSG_LEN + 64,
4567 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4568 dma_addr_t dma_addr;
4569 int i;
4570
4571 ctx->dev = caam_hash->dev;
4572
4573 if (alg->setkey) {
4574 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4575 ARRAY_SIZE(ctx->key),
4576 DMA_TO_DEVICE,
4577 DMA_ATTR_SKIP_CPU_SYNC);
4578 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4579 dev_err(ctx->dev, "unable to map key\n");
4580 return -ENOMEM;
4581 }
4582 }
4583
4584 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4585 DMA_BIDIRECTIONAL,
4586 DMA_ATTR_SKIP_CPU_SYNC);
4587 if (dma_mapping_error(ctx->dev, dma_addr)) {
4588 dev_err(ctx->dev, "unable to map shared descriptors\n");
4589 if (ctx->adata.key_dma)
4590 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4591 ARRAY_SIZE(ctx->key),
4592 DMA_TO_DEVICE,
4593 DMA_ATTR_SKIP_CPU_SYNC);
4594 return -ENOMEM;
4595 }
4596
4597 for (i = 0; i < HASH_NUM_OP; i++)
4598 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4599
4600 /* copy descriptor header template value */
4601 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4602
4603 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4604 OP_ALG_ALGSEL_SUBMASK) >>
4605 OP_ALG_ALGSEL_SHIFT];
4606
4607 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
4608
4609 /*
4610 * For keyed hash algorithms shared descriptors
4611 * will be created later in setkey() callback
4612 */
4613 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4614 }
4615
caam_hash_cra_exit(struct crypto_tfm * tfm)4616 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4617 {
4618 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4619
4620 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4621 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4622 if (ctx->adata.key_dma)
4623 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4624 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4625 DMA_ATTR_SKIP_CPU_SYNC);
4626 }
4627
caam_hash_alloc(struct device * dev,struct caam_hash_template * template,bool keyed)4628 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4629 struct caam_hash_template *template, bool keyed)
4630 {
4631 struct caam_hash_alg *t_alg;
4632 struct ahash_alg *halg;
4633 struct crypto_alg *alg;
4634
4635 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4636 if (!t_alg)
4637 return ERR_PTR(-ENOMEM);
4638
4639 t_alg->ahash_alg = template->template_ahash;
4640 halg = &t_alg->ahash_alg;
4641 alg = &halg->halg.base;
4642
4643 if (keyed) {
4644 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4645 template->hmac_name);
4646 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4647 template->hmac_driver_name);
4648 } else {
4649 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4650 template->name);
4651 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4652 template->driver_name);
4653 t_alg->ahash_alg.setkey = NULL;
4654 }
4655 alg->cra_module = THIS_MODULE;
4656 alg->cra_init = caam_hash_cra_init;
4657 alg->cra_exit = caam_hash_cra_exit;
4658 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
4659 alg->cra_priority = CAAM_CRA_PRIORITY;
4660 alg->cra_blocksize = template->blocksize;
4661 alg->cra_alignmask = 0;
4662 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4663
4664 t_alg->alg_type = template->alg_type;
4665 t_alg->dev = dev;
4666
4667 return t_alg;
4668 }
4669
dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx * nctx)4670 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4671 {
4672 struct dpaa2_caam_priv_per_cpu *ppriv;
4673
4674 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4675 napi_schedule_irqoff(&ppriv->napi);
4676 }
4677
dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv * priv)4678 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4679 {
4680 struct device *dev = priv->dev;
4681 struct dpaa2_io_notification_ctx *nctx;
4682 struct dpaa2_caam_priv_per_cpu *ppriv;
4683 int err, i = 0, cpu;
4684
4685 for_each_online_cpu(cpu) {
4686 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4687 ppriv->priv = priv;
4688 nctx = &ppriv->nctx;
4689 nctx->is_cdan = 0;
4690 nctx->id = ppriv->rsp_fqid;
4691 nctx->desired_cpu = cpu;
4692 nctx->cb = dpaa2_caam_fqdan_cb;
4693
4694 /* Register notification callbacks */
4695 ppriv->dpio = dpaa2_io_service_select(cpu);
4696 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4697 if (unlikely(err)) {
4698 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4699 nctx->cb = NULL;
4700 /*
4701 * If no affine DPIO for this core, there's probably
4702 * none available for next cores either. Signal we want
4703 * to retry later, in case the DPIO devices weren't
4704 * probed yet.
4705 */
4706 err = -EPROBE_DEFER;
4707 goto err;
4708 }
4709
4710 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4711 dev);
4712 if (unlikely(!ppriv->store)) {
4713 dev_err(dev, "dpaa2_io_store_create() failed\n");
4714 err = -ENOMEM;
4715 goto err;
4716 }
4717
4718 if (++i == priv->num_pairs)
4719 break;
4720 }
4721
4722 return 0;
4723
4724 err:
4725 for_each_online_cpu(cpu) {
4726 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4727 if (!ppriv->nctx.cb)
4728 break;
4729 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4730 }
4731
4732 for_each_online_cpu(cpu) {
4733 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4734 if (!ppriv->store)
4735 break;
4736 dpaa2_io_store_destroy(ppriv->store);
4737 }
4738
4739 return err;
4740 }
4741
dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv * priv)4742 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4743 {
4744 struct dpaa2_caam_priv_per_cpu *ppriv;
4745 int i = 0, cpu;
4746
4747 for_each_online_cpu(cpu) {
4748 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4749 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4750 priv->dev);
4751 dpaa2_io_store_destroy(ppriv->store);
4752
4753 if (++i == priv->num_pairs)
4754 return;
4755 }
4756 }
4757
dpaa2_dpseci_bind(struct dpaa2_caam_priv * priv)4758 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4759 {
4760 struct dpseci_rx_queue_cfg rx_queue_cfg;
4761 struct device *dev = priv->dev;
4762 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4763 struct dpaa2_caam_priv_per_cpu *ppriv;
4764 int err = 0, i = 0, cpu;
4765
4766 /* Configure Rx queues */
4767 for_each_online_cpu(cpu) {
4768 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4769
4770 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4771 DPSECI_QUEUE_OPT_USER_CTX;
4772 rx_queue_cfg.order_preservation_en = 0;
4773 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4774 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4775 /*
4776 * Rx priority (WQ) doesn't really matter, since we use
4777 * pull mode, i.e. volatile dequeues from specific FQs
4778 */
4779 rx_queue_cfg.dest_cfg.priority = 0;
4780 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4781
4782 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4783 &rx_queue_cfg);
4784 if (err) {
4785 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4786 err);
4787 return err;
4788 }
4789
4790 if (++i == priv->num_pairs)
4791 break;
4792 }
4793
4794 return err;
4795 }
4796
dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv * priv)4797 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4798 {
4799 struct device *dev = priv->dev;
4800
4801 if (!priv->cscn_mem)
4802 return;
4803
4804 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4805 kfree(priv->cscn_mem);
4806 }
4807
dpaa2_dpseci_free(struct dpaa2_caam_priv * priv)4808 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4809 {
4810 struct device *dev = priv->dev;
4811 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4812 int err;
4813
4814 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4815 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4816 if (err)
4817 dev_err(dev, "dpseci_reset() failed\n");
4818 }
4819
4820 dpaa2_dpseci_congestion_free(priv);
4821 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4822 }
4823
dpaa2_caam_process_fd(struct dpaa2_caam_priv * priv,const struct dpaa2_fd * fd)4824 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4825 const struct dpaa2_fd *fd)
4826 {
4827 struct caam_request *req;
4828 u32 fd_err;
4829
4830 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4831 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4832 return;
4833 }
4834
4835 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4836 if (unlikely(fd_err))
4837 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4838
4839 /*
4840 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4841 * in FD[ERR] or FD[FRC].
4842 */
4843 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4844 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4845 DMA_BIDIRECTIONAL);
4846 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4847 }
4848
dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu * ppriv)4849 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4850 {
4851 int err;
4852
4853 /* Retry while portal is busy */
4854 do {
4855 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4856 ppriv->store);
4857 } while (err == -EBUSY);
4858
4859 if (unlikely(err))
4860 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4861
4862 return err;
4863 }
4864
dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu * ppriv)4865 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4866 {
4867 struct dpaa2_dq *dq;
4868 int cleaned = 0, is_last;
4869
4870 do {
4871 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4872 if (unlikely(!dq)) {
4873 if (unlikely(!is_last)) {
4874 dev_dbg(ppriv->priv->dev,
4875 "FQ %d returned no valid frames\n",
4876 ppriv->rsp_fqid);
4877 /*
4878 * MUST retry until we get some sort of
4879 * valid response token (be it "empty dequeue"
4880 * or a valid frame).
4881 */
4882 continue;
4883 }
4884 break;
4885 }
4886
4887 /* Process FD */
4888 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4889 cleaned++;
4890 } while (!is_last);
4891
4892 return cleaned;
4893 }
4894
dpaa2_dpseci_poll(struct napi_struct * napi,int budget)4895 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4896 {
4897 struct dpaa2_caam_priv_per_cpu *ppriv;
4898 struct dpaa2_caam_priv *priv;
4899 int err, cleaned = 0, store_cleaned;
4900
4901 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4902 priv = ppriv->priv;
4903
4904 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4905 return 0;
4906
4907 do {
4908 store_cleaned = dpaa2_caam_store_consume(ppriv);
4909 cleaned += store_cleaned;
4910
4911 if (store_cleaned == 0 ||
4912 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4913 break;
4914
4915 /* Try to dequeue some more */
4916 err = dpaa2_caam_pull_fq(ppriv);
4917 if (unlikely(err))
4918 break;
4919 } while (1);
4920
4921 if (cleaned < budget) {
4922 napi_complete_done(napi, cleaned);
4923 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4924 if (unlikely(err))
4925 dev_err(priv->dev, "Notification rearm failed: %d\n",
4926 err);
4927 }
4928
4929 return cleaned;
4930 }
4931
dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv * priv,u16 token)4932 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4933 u16 token)
4934 {
4935 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4936 struct device *dev = priv->dev;
4937 unsigned int alignmask;
4938 int err;
4939
4940 /*
4941 * Congestion group feature supported starting with DPSECI API v5.1
4942 * and only when object has been created with this capability.
4943 */
4944 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4945 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4946 return 0;
4947
4948 alignmask = DPAA2_CSCN_ALIGN - 1;
4949 alignmask |= dma_get_cache_alignment() - 1;
4950 priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
4951 GFP_KERNEL);
4952 if (!priv->cscn_mem)
4953 return -ENOMEM;
4954
4955 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
4956 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4957 if (dma_mapping_error(dev, priv->cscn_dma)) {
4958 dev_err(dev, "Error mapping CSCN memory area\n");
4959 err = -ENOMEM;
4960 goto err_dma_map;
4961 }
4962
4963 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4964 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4965 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4966 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4967 cong_notif_cfg.message_iova = priv->cscn_dma;
4968 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4969 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4970 DPSECI_CGN_MODE_COHERENT_WRITE;
4971
4972 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4973 &cong_notif_cfg);
4974 if (err) {
4975 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4976 goto err_set_cong;
4977 }
4978
4979 return 0;
4980
4981 err_set_cong:
4982 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4983 err_dma_map:
4984 kfree(priv->cscn_mem);
4985
4986 return err;
4987 }
4988
dpaa2_dpseci_setup(struct fsl_mc_device * ls_dev)4989 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4990 {
4991 struct device *dev = &ls_dev->dev;
4992 struct dpaa2_caam_priv *priv;
4993 struct dpaa2_caam_priv_per_cpu *ppriv;
4994 int err, cpu;
4995 u8 i;
4996
4997 priv = dev_get_drvdata(dev);
4998
4999 priv->dev = dev;
5000 priv->dpsec_id = ls_dev->obj_desc.id;
5001
5002 /* Get a handle for the DPSECI this interface is associate with */
5003 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
5004 if (err) {
5005 dev_err(dev, "dpseci_open() failed: %d\n", err);
5006 goto err_open;
5007 }
5008
5009 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
5010 &priv->minor_ver);
5011 if (err) {
5012 dev_err(dev, "dpseci_get_api_version() failed\n");
5013 goto err_get_vers;
5014 }
5015
5016 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5017
5018 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5019 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5020 if (err) {
5021 dev_err(dev, "dpseci_reset() failed\n");
5022 goto err_get_vers;
5023 }
5024 }
5025
5026 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5027 &priv->dpseci_attr);
5028 if (err) {
5029 dev_err(dev, "dpseci_get_attributes() failed\n");
5030 goto err_get_vers;
5031 }
5032
5033 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5034 &priv->sec_attr);
5035 if (err) {
5036 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5037 goto err_get_vers;
5038 }
5039
5040 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5041 if (err) {
5042 dev_err(dev, "setup_congestion() failed\n");
5043 goto err_get_vers;
5044 }
5045
5046 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5047 priv->dpseci_attr.num_tx_queues);
5048 if (priv->num_pairs > num_online_cpus()) {
5049 dev_warn(dev, "%d queues won't be used\n",
5050 priv->num_pairs - num_online_cpus());
5051 priv->num_pairs = num_online_cpus();
5052 }
5053
5054 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5055 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5056 &priv->rx_queue_attr[i]);
5057 if (err) {
5058 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5059 goto err_get_rx_queue;
5060 }
5061 }
5062
5063 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5064 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5065 &priv->tx_queue_attr[i]);
5066 if (err) {
5067 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5068 goto err_get_rx_queue;
5069 }
5070 }
5071
5072 i = 0;
5073 for_each_online_cpu(cpu) {
5074 u8 j;
5075
5076 j = i % priv->num_pairs;
5077
5078 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5079 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5080
5081 /*
5082 * Allow all cores to enqueue, while only some of them
5083 * will take part in dequeuing.
5084 */
5085 if (++i > priv->num_pairs)
5086 continue;
5087
5088 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5089 ppriv->prio = j;
5090
5091 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5092 priv->rx_queue_attr[j].fqid,
5093 priv->tx_queue_attr[j].fqid);
5094
5095 ppriv->net_dev.dev = *dev;
5096 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5097 netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
5098 dpaa2_dpseci_poll,
5099 DPAA2_CAAM_NAPI_WEIGHT);
5100 }
5101
5102 return 0;
5103
5104 err_get_rx_queue:
5105 dpaa2_dpseci_congestion_free(priv);
5106 err_get_vers:
5107 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5108 err_open:
5109 return err;
5110 }
5111
dpaa2_dpseci_enable(struct dpaa2_caam_priv * priv)5112 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5113 {
5114 struct device *dev = priv->dev;
5115 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5116 struct dpaa2_caam_priv_per_cpu *ppriv;
5117 int i;
5118
5119 for (i = 0; i < priv->num_pairs; i++) {
5120 ppriv = per_cpu_ptr(priv->ppriv, i);
5121 napi_enable(&ppriv->napi);
5122 }
5123
5124 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5125 }
5126
dpaa2_dpseci_disable(struct dpaa2_caam_priv * priv)5127 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5128 {
5129 struct device *dev = priv->dev;
5130 struct dpaa2_caam_priv_per_cpu *ppriv;
5131 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5132 int i, err = 0, enabled;
5133
5134 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5135 if (err) {
5136 dev_err(dev, "dpseci_disable() failed\n");
5137 return err;
5138 }
5139
5140 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5141 if (err) {
5142 dev_err(dev, "dpseci_is_enabled() failed\n");
5143 return err;
5144 }
5145
5146 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5147
5148 for (i = 0; i < priv->num_pairs; i++) {
5149 ppriv = per_cpu_ptr(priv->ppriv, i);
5150 napi_disable(&ppriv->napi);
5151 netif_napi_del(&ppriv->napi);
5152 }
5153
5154 return 0;
5155 }
5156
5157 static struct list_head hash_list;
5158
dpaa2_caam_probe(struct fsl_mc_device * dpseci_dev)5159 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5160 {
5161 struct device *dev;
5162 struct dpaa2_caam_priv *priv;
5163 int i, err = 0;
5164 bool registered = false;
5165
5166 /*
5167 * There is no way to get CAAM endianness - there is no direct register
5168 * space access and MC f/w does not provide this attribute.
5169 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5170 * property.
5171 */
5172 caam_little_end = true;
5173
5174 caam_imx = false;
5175
5176 dev = &dpseci_dev->dev;
5177
5178 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5179 if (!priv)
5180 return -ENOMEM;
5181
5182 dev_set_drvdata(dev, priv);
5183
5184 priv->domain = iommu_get_domain_for_dev(dev);
5185
5186 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5187 0, 0, NULL);
5188 if (!qi_cache) {
5189 dev_err(dev, "Can't allocate SEC cache\n");
5190 return -ENOMEM;
5191 }
5192
5193 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5194 if (err) {
5195 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5196 goto err_dma_mask;
5197 }
5198
5199 /* Obtain a MC portal */
5200 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5201 if (err) {
5202 if (err == -ENXIO)
5203 err = -EPROBE_DEFER;
5204 else
5205 dev_err(dev, "MC portal allocation failed\n");
5206
5207 goto err_dma_mask;
5208 }
5209
5210 priv->ppriv = alloc_percpu(*priv->ppriv);
5211 if (!priv->ppriv) {
5212 dev_err(dev, "alloc_percpu() failed\n");
5213 err = -ENOMEM;
5214 goto err_alloc_ppriv;
5215 }
5216
5217 /* DPSECI initialization */
5218 err = dpaa2_dpseci_setup(dpseci_dev);
5219 if (err) {
5220 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5221 goto err_dpseci_setup;
5222 }
5223
5224 /* DPIO */
5225 err = dpaa2_dpseci_dpio_setup(priv);
5226 if (err) {
5227 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5228 goto err_dpio_setup;
5229 }
5230
5231 /* DPSECI binding to DPIO */
5232 err = dpaa2_dpseci_bind(priv);
5233 if (err) {
5234 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5235 goto err_bind;
5236 }
5237
5238 /* DPSECI enable */
5239 err = dpaa2_dpseci_enable(priv);
5240 if (err) {
5241 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5242 goto err_bind;
5243 }
5244
5245 dpaa2_dpseci_debugfs_init(priv);
5246
5247 /* register crypto algorithms the device supports */
5248 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5249 struct caam_skcipher_alg *t_alg = driver_algs + i;
5250 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5251
5252 /* Skip DES algorithms if not supported by device */
5253 if (!priv->sec_attr.des_acc_num &&
5254 (alg_sel == OP_ALG_ALGSEL_3DES ||
5255 alg_sel == OP_ALG_ALGSEL_DES))
5256 continue;
5257
5258 /* Skip AES algorithms if not supported by device */
5259 if (!priv->sec_attr.aes_acc_num &&
5260 alg_sel == OP_ALG_ALGSEL_AES)
5261 continue;
5262
5263 /* Skip CHACHA20 algorithms if not supported by device */
5264 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5265 !priv->sec_attr.ccha_acc_num)
5266 continue;
5267
5268 t_alg->caam.dev = dev;
5269 caam_skcipher_alg_init(t_alg);
5270
5271 err = crypto_register_skcipher(&t_alg->skcipher);
5272 if (err) {
5273 dev_warn(dev, "%s alg registration failed: %d\n",
5274 t_alg->skcipher.base.cra_driver_name, err);
5275 continue;
5276 }
5277
5278 t_alg->registered = true;
5279 registered = true;
5280 }
5281
5282 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5283 struct caam_aead_alg *t_alg = driver_aeads + i;
5284 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5285 OP_ALG_ALGSEL_MASK;
5286 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5287 OP_ALG_ALGSEL_MASK;
5288
5289 /* Skip DES algorithms if not supported by device */
5290 if (!priv->sec_attr.des_acc_num &&
5291 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5292 c1_alg_sel == OP_ALG_ALGSEL_DES))
5293 continue;
5294
5295 /* Skip AES algorithms if not supported by device */
5296 if (!priv->sec_attr.aes_acc_num &&
5297 c1_alg_sel == OP_ALG_ALGSEL_AES)
5298 continue;
5299
5300 /* Skip CHACHA20 algorithms if not supported by device */
5301 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5302 !priv->sec_attr.ccha_acc_num)
5303 continue;
5304
5305 /* Skip POLY1305 algorithms if not supported by device */
5306 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5307 !priv->sec_attr.ptha_acc_num)
5308 continue;
5309
5310 /*
5311 * Skip algorithms requiring message digests
5312 * if MD not supported by device.
5313 */
5314 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5315 !priv->sec_attr.md_acc_num)
5316 continue;
5317
5318 t_alg->caam.dev = dev;
5319 caam_aead_alg_init(t_alg);
5320
5321 err = crypto_register_aead(&t_alg->aead);
5322 if (err) {
5323 dev_warn(dev, "%s alg registration failed: %d\n",
5324 t_alg->aead.base.cra_driver_name, err);
5325 continue;
5326 }
5327
5328 t_alg->registered = true;
5329 registered = true;
5330 }
5331 if (registered)
5332 dev_info(dev, "algorithms registered in /proc/crypto\n");
5333
5334 /* register hash algorithms the device supports */
5335 INIT_LIST_HEAD(&hash_list);
5336
5337 /*
5338 * Skip registration of any hashing algorithms if MD block
5339 * is not present.
5340 */
5341 if (!priv->sec_attr.md_acc_num)
5342 return 0;
5343
5344 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5345 struct caam_hash_alg *t_alg;
5346 struct caam_hash_template *alg = driver_hash + i;
5347
5348 /* register hmac version */
5349 t_alg = caam_hash_alloc(dev, alg, true);
5350 if (IS_ERR(t_alg)) {
5351 err = PTR_ERR(t_alg);
5352 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5353 alg->hmac_driver_name, err);
5354 continue;
5355 }
5356
5357 err = crypto_register_ahash(&t_alg->ahash_alg);
5358 if (err) {
5359 dev_warn(dev, "%s alg registration failed: %d\n",
5360 t_alg->ahash_alg.halg.base.cra_driver_name,
5361 err);
5362 kfree(t_alg);
5363 } else {
5364 list_add_tail(&t_alg->entry, &hash_list);
5365 }
5366
5367 /* register unkeyed version */
5368 t_alg = caam_hash_alloc(dev, alg, false);
5369 if (IS_ERR(t_alg)) {
5370 err = PTR_ERR(t_alg);
5371 dev_warn(dev, "%s alg allocation failed: %d\n",
5372 alg->driver_name, err);
5373 continue;
5374 }
5375
5376 err = crypto_register_ahash(&t_alg->ahash_alg);
5377 if (err) {
5378 dev_warn(dev, "%s alg registration failed: %d\n",
5379 t_alg->ahash_alg.halg.base.cra_driver_name,
5380 err);
5381 kfree(t_alg);
5382 } else {
5383 list_add_tail(&t_alg->entry, &hash_list);
5384 }
5385 }
5386 if (!list_empty(&hash_list))
5387 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5388
5389 return err;
5390
5391 err_bind:
5392 dpaa2_dpseci_dpio_free(priv);
5393 err_dpio_setup:
5394 dpaa2_dpseci_free(priv);
5395 err_dpseci_setup:
5396 free_percpu(priv->ppriv);
5397 err_alloc_ppriv:
5398 fsl_mc_portal_free(priv->mc_io);
5399 err_dma_mask:
5400 kmem_cache_destroy(qi_cache);
5401
5402 return err;
5403 }
5404
dpaa2_caam_remove(struct fsl_mc_device * ls_dev)5405 static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5406 {
5407 struct device *dev;
5408 struct dpaa2_caam_priv *priv;
5409 int i;
5410
5411 dev = &ls_dev->dev;
5412 priv = dev_get_drvdata(dev);
5413
5414 dpaa2_dpseci_debugfs_exit(priv);
5415
5416 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5417 struct caam_aead_alg *t_alg = driver_aeads + i;
5418
5419 if (t_alg->registered)
5420 crypto_unregister_aead(&t_alg->aead);
5421 }
5422
5423 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5424 struct caam_skcipher_alg *t_alg = driver_algs + i;
5425
5426 if (t_alg->registered)
5427 crypto_unregister_skcipher(&t_alg->skcipher);
5428 }
5429
5430 if (hash_list.next) {
5431 struct caam_hash_alg *t_hash_alg, *p;
5432
5433 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5434 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5435 list_del(&t_hash_alg->entry);
5436 kfree(t_hash_alg);
5437 }
5438 }
5439
5440 dpaa2_dpseci_disable(priv);
5441 dpaa2_dpseci_dpio_free(priv);
5442 dpaa2_dpseci_free(priv);
5443 free_percpu(priv->ppriv);
5444 fsl_mc_portal_free(priv->mc_io);
5445 kmem_cache_destroy(qi_cache);
5446 }
5447
dpaa2_caam_enqueue(struct device * dev,struct caam_request * req)5448 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5449 {
5450 struct dpaa2_fd fd;
5451 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5452 struct dpaa2_caam_priv_per_cpu *ppriv;
5453 int err = 0, i;
5454
5455 if (IS_ERR(req))
5456 return PTR_ERR(req);
5457
5458 if (priv->cscn_mem) {
5459 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5460 DPAA2_CSCN_SIZE,
5461 DMA_FROM_DEVICE);
5462 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
5463 dev_dbg_ratelimited(dev, "Dropping request\n");
5464 return -EBUSY;
5465 }
5466 }
5467
5468 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5469
5470 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5471 DMA_BIDIRECTIONAL);
5472 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5473 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5474 goto err_out;
5475 }
5476
5477 memset(&fd, 0, sizeof(fd));
5478 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5479 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5480 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5481 dpaa2_fd_set_flc(&fd, req->flc_dma);
5482
5483 ppriv = raw_cpu_ptr(priv->ppriv);
5484 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5485 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5486 &fd);
5487 if (err != -EBUSY)
5488 break;
5489
5490 cpu_relax();
5491 }
5492
5493 if (unlikely(err)) {
5494 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5495 goto err_out;
5496 }
5497
5498 return -EINPROGRESS;
5499
5500 err_out:
5501 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5502 DMA_BIDIRECTIONAL);
5503 return -EIO;
5504 }
5505 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5506
5507 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5508 {
5509 .vendor = FSL_MC_VENDOR_FREESCALE,
5510 .obj_type = "dpseci",
5511 },
5512 { .vendor = 0x0 }
5513 };
5514 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5515
5516 static struct fsl_mc_driver dpaa2_caam_driver = {
5517 .driver = {
5518 .name = KBUILD_MODNAME,
5519 .owner = THIS_MODULE,
5520 },
5521 .probe = dpaa2_caam_probe,
5522 .remove = dpaa2_caam_remove,
5523 .match_id_table = dpaa2_caam_match_id_table
5524 };
5525
5526 MODULE_LICENSE("Dual BSD/GPL");
5527 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5528 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5529
5530 module_fsl_mc_driver(dpaa2_caam_driver);
5531