1 /**
2 * Routines supporting the Power 7+ Nest Accelerators driver
3 *
4 * Copyright (C) 2011-2012 International Business Machines Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * Author: Kent Yoder <yoder1@us.ibm.com>
20 */
21
22 #include <crypto/internal/aead.h>
23 #include <crypto/internal/hash.h>
24 #include <crypto/aes.h>
25 #include <crypto/sha.h>
26 #include <crypto/algapi.h>
27 #include <crypto/scatterwalk.h>
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/mm.h>
32 #include <linux/scatterlist.h>
33 #include <linux/device.h>
34 #include <linux/of.h>
35 #include <asm/hvcall.h>
36 #include <asm/vio.h>
37
38 #include "nx_csbcpb.h"
39 #include "nx.h"
40
41
42 /**
43 * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
44 *
45 * @nx_ctx: the crypto context handle
46 * @op: PFO operation struct to pass in
47 * @may_sleep: flag indicating the request can sleep
48 *
49 * Make the hcall, retrying while the hardware is busy. If we cannot yield
50 * the thread, limit the number of retries to 10 here.
51 */
nx_hcall_sync(struct nx_crypto_ctx * nx_ctx,struct vio_pfo_op * op,u32 may_sleep)52 int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
53 struct vio_pfo_op *op,
54 u32 may_sleep)
55 {
56 int rc, retries = 10;
57 struct vio_dev *viodev = nx_driver.viodev;
58
59 atomic_inc(&(nx_ctx->stats->sync_ops));
60
61 do {
62 rc = vio_h_cop_sync(viodev, op);
63 } while (rc == -EBUSY && !may_sleep && retries--);
64
65 if (rc) {
66 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
67 "hcall rc: %ld\n", rc, op->hcall_err);
68 atomic_inc(&(nx_ctx->stats->errors));
69 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
70 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
71 }
72
73 return rc;
74 }
75
76 /**
77 * nx_build_sg_list - build an NX scatter list describing a single buffer
78 *
79 * @sg_head: pointer to the first scatter list element to build
80 * @start_addr: pointer to the linear buffer
81 * @len: length of the data at @start_addr
82 * @sgmax: the largest number of scatter list elements we're allowed to create
83 *
84 * This function will start writing nx_sg elements at @sg_head and keep
85 * writing them until all of the data from @start_addr is described or
86 * until sgmax elements have been written. Scatter list elements will be
87 * created such that none of the elements describes a buffer that crosses a 4K
88 * boundary.
89 */
nx_build_sg_list(struct nx_sg * sg_head,u8 * start_addr,unsigned int * len,u32 sgmax)90 struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
91 u8 *start_addr,
92 unsigned int *len,
93 u32 sgmax)
94 {
95 unsigned int sg_len = 0;
96 struct nx_sg *sg;
97 u64 sg_addr = (u64)start_addr;
98 u64 end_addr;
99
100 /* determine the start and end for this address range - slightly
101 * different if this is in VMALLOC_REGION */
102 if (is_vmalloc_addr(start_addr))
103 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
104 + offset_in_page(sg_addr);
105 else
106 sg_addr = __pa(sg_addr);
107
108 end_addr = sg_addr + *len;
109
110 /* each iteration will write one struct nx_sg element and add the
111 * length of data described by that element to sg_len. Once @len bytes
112 * have been described (or @sgmax elements have been written), the
113 * loop ends. min_t is used to ensure @end_addr falls on the same page
114 * as sg_addr, if not, we need to create another nx_sg element for the
115 * data on the next page.
116 *
117 * Also when using vmalloc'ed data, every time that a system page
118 * boundary is crossed the physical address needs to be re-calculated.
119 */
120 for (sg = sg_head; sg_len < *len; sg++) {
121 u64 next_page;
122
123 sg->addr = sg_addr;
124 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
125 end_addr);
126
127 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
128 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
129 sg_len += sg->len;
130
131 if (sg_addr >= next_page &&
132 is_vmalloc_addr(start_addr + sg_len)) {
133 sg_addr = page_to_phys(vmalloc_to_page(
134 start_addr + sg_len));
135 end_addr = sg_addr + *len - sg_len;
136 }
137
138 if ((sg - sg_head) == sgmax) {
139 pr_err("nx: scatter/gather list overflow, pid: %d\n",
140 current->pid);
141 sg++;
142 break;
143 }
144 }
145 *len = sg_len;
146
147 /* return the moved sg_head pointer */
148 return sg;
149 }
150
151 /**
152 * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
153 *
154 * @nx_dst: pointer to the first nx_sg element to write
155 * @sglen: max number of nx_sg entries we're allowed to write
156 * @sg_src: pointer to the source linux scatterlist to walk
157 * @start: number of bytes to fast-forward past at the beginning of @sg_src
158 * @src_len: number of bytes to walk in @sg_src
159 */
nx_walk_and_build(struct nx_sg * nx_dst,unsigned int sglen,struct scatterlist * sg_src,unsigned int start,unsigned int * src_len)160 struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
161 unsigned int sglen,
162 struct scatterlist *sg_src,
163 unsigned int start,
164 unsigned int *src_len)
165 {
166 struct scatter_walk walk;
167 struct nx_sg *nx_sg = nx_dst;
168 unsigned int n, offset = 0, len = *src_len;
169 char *dst;
170
171 /* we need to fast forward through @start bytes first */
172 for (;;) {
173 scatterwalk_start(&walk, sg_src);
174
175 if (start < offset + sg_src->length)
176 break;
177
178 offset += sg_src->length;
179 sg_src = sg_next(sg_src);
180 }
181
182 /* start - offset is the number of bytes to advance in the scatterlist
183 * element we're currently looking at */
184 scatterwalk_advance(&walk, start - offset);
185
186 while (len && (nx_sg - nx_dst) < sglen) {
187 n = scatterwalk_clamp(&walk, len);
188 if (!n) {
189 /* In cases where we have scatterlist chain sg_next
190 * handles with it properly */
191 scatterwalk_start(&walk, sg_next(walk.sg));
192 n = scatterwalk_clamp(&walk, len);
193 }
194 dst = scatterwalk_map(&walk);
195
196 nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
197 len -= n;
198
199 scatterwalk_unmap(dst);
200 scatterwalk_advance(&walk, n);
201 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
202 }
203 /* update to_process */
204 *src_len -= len;
205
206 /* return the moved destination pointer */
207 return nx_sg;
208 }
209
210 /**
211 * trim_sg_list - ensures the bound in sg list.
212 * @sg: sg list head
213 * @end: sg lisg end
214 * @delta: is the amount we need to crop in order to bound the list.
215 *
216 */
trim_sg_list(struct nx_sg * sg,struct nx_sg * end,unsigned int delta,unsigned int * nbytes)217 static long int trim_sg_list(struct nx_sg *sg,
218 struct nx_sg *end,
219 unsigned int delta,
220 unsigned int *nbytes)
221 {
222 long int oplen;
223 long int data_back;
224 unsigned int is_delta = delta;
225
226 while (delta && end > sg) {
227 struct nx_sg *last = end - 1;
228
229 if (last->len > delta) {
230 last->len -= delta;
231 delta = 0;
232 } else {
233 end--;
234 delta -= last->len;
235 }
236 }
237
238 /* There are cases where we need to crop list in order to make it
239 * a block size multiple, but we also need to align data. In order to
240 * that we need to calculate how much we need to put back to be
241 * processed
242 */
243 oplen = (sg - end) * sizeof(struct nx_sg);
244 if (is_delta) {
245 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
246 data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
247 *nbytes -= data_back;
248 }
249
250 return oplen;
251 }
252
253 /**
254 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
255 * scatterlists based on them.
256 *
257 * @nx_ctx: NX crypto context for the lists we're building
258 * @desc: the block cipher descriptor for the operation
259 * @dst: destination scatterlist
260 * @src: source scatterlist
261 * @nbytes: length of data described in the scatterlists
262 * @offset: number of bytes to fast-forward past at the beginning of
263 * scatterlists.
264 * @iv: destination for the iv data, if the algorithm requires it
265 *
266 * This is common code shared by all the AES algorithms. It uses the block
267 * cipher walk routines to traverse input and output scatterlists, building
268 * corresponding NX scatterlists
269 */
nx_build_sg_lists(struct nx_crypto_ctx * nx_ctx,struct blkcipher_desc * desc,struct scatterlist * dst,struct scatterlist * src,unsigned int * nbytes,unsigned int offset,u8 * iv)270 int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
271 struct blkcipher_desc *desc,
272 struct scatterlist *dst,
273 struct scatterlist *src,
274 unsigned int *nbytes,
275 unsigned int offset,
276 u8 *iv)
277 {
278 unsigned int delta = 0;
279 unsigned int total = *nbytes;
280 struct nx_sg *nx_insg = nx_ctx->in_sg;
281 struct nx_sg *nx_outsg = nx_ctx->out_sg;
282 unsigned int max_sg_len;
283
284 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
285 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
286 max_sg_len = min_t(u64, max_sg_len,
287 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
288
289 if (iv)
290 memcpy(iv, desc->info, AES_BLOCK_SIZE);
291
292 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
293
294 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
295 offset, nbytes);
296 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
297 offset, nbytes);
298
299 if (*nbytes < total)
300 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
301
302 /* these lengths should be negative, which will indicate to phyp that
303 * the input and output parameters are scatterlists, not linear
304 * buffers */
305 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
306 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
307
308 return 0;
309 }
310
311 /**
312 * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
313 *
314 * @nx_ctx: the nx context to initialize
315 * @function: the function code for the op
316 */
nx_ctx_init(struct nx_crypto_ctx * nx_ctx,unsigned int function)317 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
318 {
319 spin_lock_init(&nx_ctx->lock);
320 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
321 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
322
323 nx_ctx->op.flags = function;
324 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
325 nx_ctx->op.in = __pa(nx_ctx->in_sg);
326 nx_ctx->op.out = __pa(nx_ctx->out_sg);
327
328 if (nx_ctx->csbcpb_aead) {
329 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
330
331 nx_ctx->op_aead.flags = function;
332 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
333 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
334 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
335 }
336 }
337
nx_of_update_status(struct device * dev,struct property * p,struct nx_of * props)338 static void nx_of_update_status(struct device *dev,
339 struct property *p,
340 struct nx_of *props)
341 {
342 if (!strncmp(p->value, "okay", p->length)) {
343 props->status = NX_WAITING;
344 props->flags |= NX_OF_FLAG_STATUS_SET;
345 } else {
346 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
347 (char *)p->value);
348 }
349 }
350
nx_of_update_sglen(struct device * dev,struct property * p,struct nx_of * props)351 static void nx_of_update_sglen(struct device *dev,
352 struct property *p,
353 struct nx_of *props)
354 {
355 if (p->length != sizeof(props->max_sg_len)) {
356 dev_err(dev, "%s: unexpected format for "
357 "ibm,max-sg-len property\n", __func__);
358 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
359 "long, expected %zd bytes\n", __func__,
360 p->length, sizeof(props->max_sg_len));
361 return;
362 }
363
364 props->max_sg_len = *(u32 *)p->value;
365 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
366 }
367
nx_of_update_msc(struct device * dev,struct property * p,struct nx_of * props)368 static void nx_of_update_msc(struct device *dev,
369 struct property *p,
370 struct nx_of *props)
371 {
372 struct msc_triplet *trip;
373 struct max_sync_cop *msc;
374 unsigned int bytes_so_far, i, lenp;
375
376 msc = (struct max_sync_cop *)p->value;
377 lenp = p->length;
378
379 /* You can't tell if the data read in for this property is sane by its
380 * size alone. This is because there are sizes embedded in the data
381 * structure. The best we can do is check lengths as we parse and bail
382 * as soon as a length error is detected. */
383 bytes_so_far = 0;
384
385 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
386 bytes_so_far += sizeof(struct max_sync_cop);
387
388 trip = msc->trip;
389
390 for (i = 0;
391 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
392 i < msc->triplets;
393 i++) {
394 if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
395 dev_err(dev, "unknown function code/mode "
396 "combo: %d/%d (ignored)\n", msc->fc,
397 msc->mode);
398 goto next_loop;
399 }
400
401 if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
402 dev_warn(dev, "bogus sglen/databytelen: "
403 "%u/%u (ignored)\n", trip->sglen,
404 trip->databytelen);
405 goto next_loop;
406 }
407
408 switch (trip->keybitlen) {
409 case 128:
410 case 160:
411 props->ap[msc->fc][msc->mode][0].databytelen =
412 trip->databytelen;
413 props->ap[msc->fc][msc->mode][0].sglen =
414 trip->sglen;
415 break;
416 case 192:
417 props->ap[msc->fc][msc->mode][1].databytelen =
418 trip->databytelen;
419 props->ap[msc->fc][msc->mode][1].sglen =
420 trip->sglen;
421 break;
422 case 256:
423 if (msc->fc == NX_FC_AES) {
424 props->ap[msc->fc][msc->mode][2].
425 databytelen = trip->databytelen;
426 props->ap[msc->fc][msc->mode][2].sglen =
427 trip->sglen;
428 } else if (msc->fc == NX_FC_AES_HMAC ||
429 msc->fc == NX_FC_SHA) {
430 props->ap[msc->fc][msc->mode][1].
431 databytelen = trip->databytelen;
432 props->ap[msc->fc][msc->mode][1].sglen =
433 trip->sglen;
434 } else {
435 dev_warn(dev, "unknown function "
436 "code/key bit len combo"
437 ": (%u/256)\n", msc->fc);
438 }
439 break;
440 case 512:
441 props->ap[msc->fc][msc->mode][2].databytelen =
442 trip->databytelen;
443 props->ap[msc->fc][msc->mode][2].sglen =
444 trip->sglen;
445 break;
446 default:
447 dev_warn(dev, "unknown function code/key bit "
448 "len combo: (%u/%u)\n", msc->fc,
449 trip->keybitlen);
450 break;
451 }
452 next_loop:
453 bytes_so_far += sizeof(struct msc_triplet);
454 trip++;
455 }
456
457 msc = (struct max_sync_cop *)trip;
458 }
459
460 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
461 }
462
463 /**
464 * nx_of_init - read openFirmware values from the device tree
465 *
466 * @dev: device handle
467 * @props: pointer to struct to hold the properties values
468 *
469 * Called once at driver probe time, this function will read out the
470 * openFirmware properties we use at runtime. If all the OF properties are
471 * acceptable, when we exit this function props->flags will indicate that
472 * we're ready to register our crypto algorithms.
473 */
nx_of_init(struct device * dev,struct nx_of * props)474 static void nx_of_init(struct device *dev, struct nx_of *props)
475 {
476 struct device_node *base_node = dev->of_node;
477 struct property *p;
478
479 p = of_find_property(base_node, "status", NULL);
480 if (!p)
481 dev_info(dev, "%s: property 'status' not found\n", __func__);
482 else
483 nx_of_update_status(dev, p, props);
484
485 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
486 if (!p)
487 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
488 __func__);
489 else
490 nx_of_update_sglen(dev, p, props);
491
492 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
493 if (!p)
494 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
495 __func__);
496 else
497 nx_of_update_msc(dev, p, props);
498 }
499
nx_check_prop(struct device * dev,u32 fc,u32 mode,int slot)500 static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
501 {
502 struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
503
504 if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
505 if (dev)
506 dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
507 "%u/%u (ignored)\n", fc, mode, slot,
508 props->sglen, props->databytelen);
509 return false;
510 }
511
512 return true;
513 }
514
nx_check_props(struct device * dev,u32 fc,u32 mode)515 static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
516 {
517 int i;
518
519 for (i = 0; i < 3; i++)
520 if (!nx_check_prop(dev, fc, mode, i))
521 return false;
522
523 return true;
524 }
525
nx_register_alg(struct crypto_alg * alg,u32 fc,u32 mode)526 static int nx_register_alg(struct crypto_alg *alg, u32 fc, u32 mode)
527 {
528 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
529 crypto_register_alg(alg) : 0;
530 }
531
nx_register_aead(struct aead_alg * alg,u32 fc,u32 mode)532 static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
533 {
534 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
535 crypto_register_aead(alg) : 0;
536 }
537
nx_register_shash(struct shash_alg * alg,u32 fc,u32 mode,int slot)538 static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
539 {
540 return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
541 fc, mode, slot) :
542 nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
543 crypto_register_shash(alg) : 0;
544 }
545
nx_unregister_alg(struct crypto_alg * alg,u32 fc,u32 mode)546 static void nx_unregister_alg(struct crypto_alg *alg, u32 fc, u32 mode)
547 {
548 if (nx_check_props(NULL, fc, mode))
549 crypto_unregister_alg(alg);
550 }
551
nx_unregister_aead(struct aead_alg * alg,u32 fc,u32 mode)552 static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
553 {
554 if (nx_check_props(NULL, fc, mode))
555 crypto_unregister_aead(alg);
556 }
557
nx_unregister_shash(struct shash_alg * alg,u32 fc,u32 mode,int slot)558 static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
559 int slot)
560 {
561 if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
562 nx_check_props(NULL, fc, mode))
563 crypto_unregister_shash(alg);
564 }
565
566 /**
567 * nx_register_algs - register algorithms with the crypto API
568 *
569 * Called from nx_probe()
570 *
571 * If all OF properties are in an acceptable state, the driver flags will
572 * indicate that we're ready and we'll create our debugfs files and register
573 * out crypto algorithms.
574 */
nx_register_algs(void)575 static int nx_register_algs(void)
576 {
577 int rc = -1;
578
579 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
580 goto out;
581
582 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
583
584 rc = NX_DEBUGFS_INIT(&nx_driver);
585 if (rc)
586 goto out;
587
588 nx_driver.of.status = NX_OKAY;
589
590 rc = nx_register_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
591 if (rc)
592 goto out;
593
594 rc = nx_register_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
595 if (rc)
596 goto out_unreg_ecb;
597
598 rc = nx_register_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
599 if (rc)
600 goto out_unreg_cbc;
601
602 rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
603 if (rc)
604 goto out_unreg_ctr3686;
605
606 rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
607 if (rc)
608 goto out_unreg_gcm;
609
610 rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
611 if (rc)
612 goto out_unreg_gcm4106;
613
614 rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
615 if (rc)
616 goto out_unreg_ccm;
617
618 rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
619 NX_PROPS_SHA256);
620 if (rc)
621 goto out_unreg_ccm4309;
622
623 rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
624 NX_PROPS_SHA512);
625 if (rc)
626 goto out_unreg_s256;
627
628 rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
629 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
630 if (rc)
631 goto out_unreg_s512;
632
633 goto out;
634
635 out_unreg_s512:
636 nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
637 NX_PROPS_SHA512);
638 out_unreg_s256:
639 nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
640 NX_PROPS_SHA256);
641 out_unreg_ccm4309:
642 nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
643 out_unreg_ccm:
644 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
645 out_unreg_gcm4106:
646 nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
647 out_unreg_gcm:
648 nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
649 out_unreg_ctr3686:
650 nx_unregister_alg(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
651 out_unreg_cbc:
652 nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
653 out_unreg_ecb:
654 nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
655 out:
656 return rc;
657 }
658
659 /**
660 * nx_crypto_ctx_init - create and initialize a crypto api context
661 *
662 * @nx_ctx: the crypto api context
663 * @fc: function code for the context
664 * @mode: the function code specific mode for this context
665 */
nx_crypto_ctx_init(struct nx_crypto_ctx * nx_ctx,u32 fc,u32 mode)666 static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
667 {
668 if (nx_driver.of.status != NX_OKAY) {
669 pr_err("Attempt to initialize NX crypto context while device "
670 "is not available!\n");
671 return -ENODEV;
672 }
673
674 /* we need an extra page for csbcpb_aead for these modes */
675 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
676 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
677 sizeof(struct nx_csbcpb);
678 else
679 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
680 sizeof(struct nx_csbcpb);
681
682 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
683 if (!nx_ctx->kmem)
684 return -ENOMEM;
685
686 /* the csbcpb and scatterlists must be 4K aligned pages */
687 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
688 (u64)NX_PAGE_SIZE));
689 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
690 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
691
692 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
693 nx_ctx->csbcpb_aead =
694 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
695 NX_PAGE_SIZE);
696
697 /* give each context a pointer to global stats and their OF
698 * properties */
699 nx_ctx->stats = &nx_driver.stats;
700 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
701 sizeof(struct alg_props) * 3);
702
703 return 0;
704 }
705
706 /* entry points from the crypto tfm initializers */
nx_crypto_ctx_aes_ccm_init(struct crypto_aead * tfm)707 int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
708 {
709 crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
710 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
711 NX_MODE_AES_CCM);
712 }
713
nx_crypto_ctx_aes_gcm_init(struct crypto_aead * tfm)714 int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
715 {
716 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
717 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
718 NX_MODE_AES_GCM);
719 }
720
nx_crypto_ctx_aes_ctr_init(struct crypto_tfm * tfm)721 int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm *tfm)
722 {
723 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
724 NX_MODE_AES_CTR);
725 }
726
nx_crypto_ctx_aes_cbc_init(struct crypto_tfm * tfm)727 int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm *tfm)
728 {
729 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
730 NX_MODE_AES_CBC);
731 }
732
nx_crypto_ctx_aes_ecb_init(struct crypto_tfm * tfm)733 int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm *tfm)
734 {
735 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
736 NX_MODE_AES_ECB);
737 }
738
nx_crypto_ctx_sha_init(struct crypto_tfm * tfm)739 int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm)
740 {
741 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
742 }
743
nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm * tfm)744 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm)
745 {
746 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
747 NX_MODE_AES_XCBC_MAC);
748 }
749
750 /**
751 * nx_crypto_ctx_exit - destroy a crypto api context
752 *
753 * @tfm: the crypto transform pointer for the context
754 *
755 * As crypto API contexts are destroyed, this exit hook is called to free the
756 * memory associated with it.
757 */
nx_crypto_ctx_exit(struct crypto_tfm * tfm)758 void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
759 {
760 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
761
762 kzfree(nx_ctx->kmem);
763 nx_ctx->csbcpb = NULL;
764 nx_ctx->csbcpb_aead = NULL;
765 nx_ctx->in_sg = NULL;
766 nx_ctx->out_sg = NULL;
767 }
768
nx_crypto_ctx_aead_exit(struct crypto_aead * tfm)769 void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
770 {
771 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
772
773 kzfree(nx_ctx->kmem);
774 }
775
nx_probe(struct vio_dev * viodev,const struct vio_device_id * id)776 static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
777 {
778 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
779 viodev->name, viodev->resource_id);
780
781 if (nx_driver.viodev) {
782 dev_err(&viodev->dev, "%s: Attempt to register more than one "
783 "instance of the hardware\n", __func__);
784 return -EINVAL;
785 }
786
787 nx_driver.viodev = viodev;
788
789 nx_of_init(&viodev->dev, &nx_driver.of);
790
791 return nx_register_algs();
792 }
793
nx_remove(struct vio_dev * viodev)794 static int nx_remove(struct vio_dev *viodev)
795 {
796 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
797 viodev->unit_address);
798
799 if (nx_driver.of.status == NX_OKAY) {
800 NX_DEBUGFS_FINI(&nx_driver);
801
802 nx_unregister_shash(&nx_shash_aes_xcbc_alg,
803 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
804 nx_unregister_shash(&nx_shash_sha512_alg,
805 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
806 nx_unregister_shash(&nx_shash_sha256_alg,
807 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
808 nx_unregister_aead(&nx_ccm4309_aes_alg,
809 NX_FC_AES, NX_MODE_AES_CCM);
810 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
811 nx_unregister_aead(&nx_gcm4106_aes_alg,
812 NX_FC_AES, NX_MODE_AES_GCM);
813 nx_unregister_aead(&nx_gcm_aes_alg,
814 NX_FC_AES, NX_MODE_AES_GCM);
815 nx_unregister_alg(&nx_ctr3686_aes_alg,
816 NX_FC_AES, NX_MODE_AES_CTR);
817 nx_unregister_alg(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
818 nx_unregister_alg(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
819 }
820
821 return 0;
822 }
823
824
825 /* module wide initialization/cleanup */
nx_init(void)826 static int __init nx_init(void)
827 {
828 return vio_register_driver(&nx_driver.viodriver);
829 }
830
nx_fini(void)831 static void __exit nx_fini(void)
832 {
833 vio_unregister_driver(&nx_driver.viodriver);
834 }
835
836 static const struct vio_device_id nx_crypto_driver_ids[] = {
837 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
838 { "", "" }
839 };
840 MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
841
842 /* driver state structure */
843 struct nx_crypto_driver nx_driver = {
844 .viodriver = {
845 .id_table = nx_crypto_driver_ids,
846 .probe = nx_probe,
847 .remove = nx_remove,
848 .name = NX_NAME,
849 },
850 };
851
852 module_init(nx_init);
853 module_exit(nx_fini);
854
855 MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
856 MODULE_DESCRIPTION(NX_STRING);
857 MODULE_LICENSE("GPL");
858 MODULE_VERSION(NX_VERSION);
859