Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Routines supporting the Power 7+ Nest Accelerators driver
4 *
5 * Copyright (C) 2011-2012 International Business Machines Inc.
6 *
7 * Author: Kent Yoder <yoder1@us.ibm.com>
8 */
9
10#include <crypto/aes.h>
11#include <crypto/internal/aead.h>
12#include <crypto/internal/hash.h>
13#include <crypto/internal/skcipher.h>
14#include <crypto/sha2.h>
15#include <crypto/scatterwalk.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/types.h>
19#include <linux/mm.h>
20#include <linux/scatterlist.h>
21#include <linux/device.h>
22#include <linux/of.h>
23#include <asm/hvcall.h>
24#include <asm/vio.h>
25
26#include "nx_csbcpb.h"
27#include "nx.h"
28
29
30/**
31 * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
32 *
33 * @nx_ctx: the crypto context handle
34 * @op: PFO operation struct to pass in
35 * @may_sleep: flag indicating the request can sleep
36 *
37 * Make the hcall, retrying while the hardware is busy. If we cannot yield
38 * the thread, limit the number of retries to 10 here.
39 */
40int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
41 struct vio_pfo_op *op,
42 u32 may_sleep)
43{
44 int rc, retries = 10;
45 struct vio_dev *viodev = nx_driver.viodev;
46
47 atomic_inc(&(nx_ctx->stats->sync_ops));
48
49 do {
50 rc = vio_h_cop_sync(viodev, op);
51 } while (rc == -EBUSY && !may_sleep && retries--);
52
53 if (rc) {
54 dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
55 "hcall rc: %ld\n", rc, op->hcall_err);
56 atomic_inc(&(nx_ctx->stats->errors));
57 atomic_set(&(nx_ctx->stats->last_error), op->hcall_err);
58 atomic_set(&(nx_ctx->stats->last_error_pid), current->pid);
59 }
60
61 return rc;
62}
63
64/**
65 * nx_build_sg_list - build an NX scatter list describing a single buffer
66 *
67 * @sg_head: pointer to the first scatter list element to build
68 * @start_addr: pointer to the linear buffer
69 * @len: length of the data at @start_addr
70 * @sgmax: the largest number of scatter list elements we're allowed to create
71 *
72 * This function will start writing nx_sg elements at @sg_head and keep
73 * writing them until all of the data from @start_addr is described or
74 * until sgmax elements have been written. Scatter list elements will be
75 * created such that none of the elements describes a buffer that crosses a 4K
76 * boundary.
77 */
78struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
79 u8 *start_addr,
80 unsigned int *len,
81 u32 sgmax)
82{
83 unsigned int sg_len = 0;
84 struct nx_sg *sg;
85 u64 sg_addr = (u64)start_addr;
86 u64 end_addr;
87
88 /* determine the start and end for this address range - slightly
89 * different if this is in VMALLOC_REGION */
90 if (is_vmalloc_addr(start_addr))
91 sg_addr = page_to_phys(vmalloc_to_page(start_addr))
92 + offset_in_page(sg_addr);
93 else
94 sg_addr = __pa(sg_addr);
95
96 end_addr = sg_addr + *len;
97
98 /* each iteration will write one struct nx_sg element and add the
99 * length of data described by that element to sg_len. Once @len bytes
100 * have been described (or @sgmax elements have been written), the
101 * loop ends. min_t is used to ensure @end_addr falls on the same page
102 * as sg_addr, if not, we need to create another nx_sg element for the
103 * data on the next page.
104 *
105 * Also when using vmalloc'ed data, every time that a system page
106 * boundary is crossed the physical address needs to be re-calculated.
107 */
108 for (sg = sg_head; sg_len < *len; sg++) {
109 u64 next_page;
110
111 sg->addr = sg_addr;
112 sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
113 end_addr);
114
115 next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
116 sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
117 sg_len += sg->len;
118
119 if (sg_addr >= next_page &&
120 is_vmalloc_addr(start_addr + sg_len)) {
121 sg_addr = page_to_phys(vmalloc_to_page(
122 start_addr + sg_len));
123 end_addr = sg_addr + *len - sg_len;
124 }
125
126 if ((sg - sg_head) == sgmax) {
127 sg++;
128 break;
129 }
130 }
131 *len = sg_len;
132
133 /* return the moved sg_head pointer */
134 return sg;
135}
136
137/**
138 * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
139 *
140 * @nx_dst: pointer to the first nx_sg element to write
141 * @sglen: max number of nx_sg entries we're allowed to write
142 * @sg_src: pointer to the source linux scatterlist to walk
143 * @start: number of bytes to fast-forward past at the beginning of @sg_src
144 * @src_len: number of bytes to walk in @sg_src
145 */
146struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
147 unsigned int sglen,
148 struct scatterlist *sg_src,
149 unsigned int start,
150 unsigned int *src_len)
151{
152 struct scatter_walk walk;
153 struct nx_sg *nx_sg = nx_dst;
154 unsigned int n, len = *src_len;
155
156 /* we need to fast forward through @start bytes first */
157 scatterwalk_start_at_pos(&walk, sg_src, start);
158
159 while (len && (nx_sg - nx_dst) < sglen) {
160 n = scatterwalk_next(&walk, len);
161
162 nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst));
163
164 scatterwalk_done_src(&walk, n);
165 len -= n;
166 }
167 /* update to_process */
168 *src_len -= len;
169
170 /* return the moved destination pointer */
171 return nx_sg;
172}
173
174/**
175 * trim_sg_list - ensures the bound in sg list.
176 * @sg: sg list head
177 * @end: sg lisg end
178 * @delta: is the amount we need to crop in order to bound the list.
179 * @nbytes: length of data in the scatterlists or data length - whichever
180 * is greater.
181 */
182static long int trim_sg_list(struct nx_sg *sg,
183 struct nx_sg *end,
184 unsigned int delta,
185 unsigned int *nbytes)
186{
187 long int oplen;
188 long int data_back;
189 unsigned int is_delta = delta;
190
191 while (delta && end > sg) {
192 struct nx_sg *last = end - 1;
193
194 if (last->len > delta) {
195 last->len -= delta;
196 delta = 0;
197 } else {
198 end--;
199 delta -= last->len;
200 }
201 }
202
203 /* There are cases where we need to crop list in order to make it
204 * a block size multiple, but we also need to align data. In order to
205 * that we need to calculate how much we need to put back to be
206 * processed
207 */
208 oplen = (sg - end) * sizeof(struct nx_sg);
209 if (is_delta) {
210 data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
211 data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
212 *nbytes -= data_back;
213 }
214
215 return oplen;
216}
217
218/**
219 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
220 * scatterlists based on them.
221 *
222 * @nx_ctx: NX crypto context for the lists we're building
223 * @iv: iv data, if the algorithm requires it
224 * @dst: destination scatterlist
225 * @src: source scatterlist
226 * @nbytes: length of data described in the scatterlists
227 * @offset: number of bytes to fast-forward past at the beginning of
228 * scatterlists.
229 * @oiv: destination for the iv data, if the algorithm requires it
230 *
231 * This is common code shared by all the AES algorithms. It uses the crypto
232 * scatterlist walk routines to traverse input and output scatterlists, building
233 * corresponding NX scatterlists
234 */
235int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
236 const u8 *iv,
237 struct scatterlist *dst,
238 struct scatterlist *src,
239 unsigned int *nbytes,
240 unsigned int offset,
241 u8 *oiv)
242{
243 unsigned int delta = 0;
244 unsigned int total = *nbytes;
245 struct nx_sg *nx_insg = nx_ctx->in_sg;
246 struct nx_sg *nx_outsg = nx_ctx->out_sg;
247 unsigned int max_sg_len;
248
249 max_sg_len = min_t(u64, nx_ctx->ap->sglen,
250 nx_driver.of.max_sg_len/sizeof(struct nx_sg));
251 max_sg_len = min_t(u64, max_sg_len,
252 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
253
254 if (oiv)
255 memcpy(oiv, iv, AES_BLOCK_SIZE);
256
257 *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
258
259 nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
260 offset, nbytes);
261 nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
262 offset, nbytes);
263
264 if (*nbytes < total)
265 delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
266
267 /* these lengths should be negative, which will indicate to phyp that
268 * the input and output parameters are scatterlists, not linear
269 * buffers */
270 nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
271 nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
272
273 return 0;
274}
275
276/**
277 * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
278 *
279 * @nx_ctx: the nx context to initialize
280 * @function: the function code for the op
281 */
282void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
283{
284 spin_lock_init(&nx_ctx->lock);
285 memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
286 nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
287
288 nx_ctx->op.flags = function;
289 nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb);
290 nx_ctx->op.in = __pa(nx_ctx->in_sg);
291 nx_ctx->op.out = __pa(nx_ctx->out_sg);
292
293 if (nx_ctx->csbcpb_aead) {
294 nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT;
295
296 nx_ctx->op_aead.flags = function;
297 nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead);
298 nx_ctx->op_aead.in = __pa(nx_ctx->in_sg);
299 nx_ctx->op_aead.out = __pa(nx_ctx->out_sg);
300 }
301}
302
303static void nx_of_update_status(struct device *dev,
304 struct property *p,
305 struct nx_of *props)
306{
307 if (!strncmp(p->value, "okay", p->length)) {
308 props->status = NX_WAITING;
309 props->flags |= NX_OF_FLAG_STATUS_SET;
310 } else {
311 dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__,
312 (char *)p->value);
313 }
314}
315
316static void nx_of_update_sglen(struct device *dev,
317 struct property *p,
318 struct nx_of *props)
319{
320 if (p->length != sizeof(props->max_sg_len)) {
321 dev_err(dev, "%s: unexpected format for "
322 "ibm,max-sg-len property\n", __func__);
323 dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes "
324 "long, expected %zd bytes\n", __func__,
325 p->length, sizeof(props->max_sg_len));
326 return;
327 }
328
329 props->max_sg_len = *(u32 *)p->value;
330 props->flags |= NX_OF_FLAG_MAXSGLEN_SET;
331}
332
333static void nx_of_update_msc(struct device *dev,
334 struct property *p,
335 struct nx_of *props)
336{
337 struct msc_triplet *trip;
338 struct max_sync_cop *msc;
339 unsigned int bytes_so_far, i, lenp;
340
341 msc = (struct max_sync_cop *)p->value;
342 lenp = p->length;
343
344 /* You can't tell if the data read in for this property is sane by its
345 * size alone. This is because there are sizes embedded in the data
346 * structure. The best we can do is check lengths as we parse and bail
347 * as soon as a length error is detected. */
348 bytes_so_far = 0;
349
350 while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) {
351 bytes_so_far += sizeof(struct max_sync_cop);
352
353 trip = msc->trip;
354
355 for (i = 0;
356 ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) &&
357 i < msc->triplets;
358 i++) {
359 if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) {
360 dev_err(dev, "unknown function code/mode "
361 "combo: %d/%d (ignored)\n", msc->fc,
362 msc->mode);
363 goto next_loop;
364 }
365
366 if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) {
367 dev_warn(dev, "bogus sglen/databytelen: "
368 "%u/%u (ignored)\n", trip->sglen,
369 trip->databytelen);
370 goto next_loop;
371 }
372
373 switch (trip->keybitlen) {
374 case 128:
375 case 160:
376 props->ap[msc->fc][msc->mode][0].databytelen =
377 trip->databytelen;
378 props->ap[msc->fc][msc->mode][0].sglen =
379 trip->sglen;
380 break;
381 case 192:
382 props->ap[msc->fc][msc->mode][1].databytelen =
383 trip->databytelen;
384 props->ap[msc->fc][msc->mode][1].sglen =
385 trip->sglen;
386 break;
387 case 256:
388 if (msc->fc == NX_FC_AES) {
389 props->ap[msc->fc][msc->mode][2].
390 databytelen = trip->databytelen;
391 props->ap[msc->fc][msc->mode][2].sglen =
392 trip->sglen;
393 } else if (msc->fc == NX_FC_AES_HMAC ||
394 msc->fc == NX_FC_SHA) {
395 props->ap[msc->fc][msc->mode][1].
396 databytelen = trip->databytelen;
397 props->ap[msc->fc][msc->mode][1].sglen =
398 trip->sglen;
399 } else {
400 dev_warn(dev, "unknown function "
401 "code/key bit len combo"
402 ": (%u/256)\n", msc->fc);
403 }
404 break;
405 case 512:
406 props->ap[msc->fc][msc->mode][2].databytelen =
407 trip->databytelen;
408 props->ap[msc->fc][msc->mode][2].sglen =
409 trip->sglen;
410 break;
411 default:
412 dev_warn(dev, "unknown function code/key bit "
413 "len combo: (%u/%u)\n", msc->fc,
414 trip->keybitlen);
415 break;
416 }
417next_loop:
418 bytes_so_far += sizeof(struct msc_triplet);
419 trip++;
420 }
421
422 msc = (struct max_sync_cop *)trip;
423 }
424
425 props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET;
426}
427
428/**
429 * nx_of_init - read openFirmware values from the device tree
430 *
431 * @dev: device handle
432 * @props: pointer to struct to hold the properties values
433 *
434 * Called once at driver probe time, this function will read out the
435 * openFirmware properties we use at runtime. If all the OF properties are
436 * acceptable, when we exit this function props->flags will indicate that
437 * we're ready to register our crypto algorithms.
438 */
439static void nx_of_init(struct device *dev, struct nx_of *props)
440{
441 struct device_node *base_node = dev->of_node;
442 struct property *p;
443
444 p = of_find_property(base_node, "status", NULL);
445 if (!p)
446 dev_info(dev, "%s: property 'status' not found\n", __func__);
447 else
448 nx_of_update_status(dev, p, props);
449
450 p = of_find_property(base_node, "ibm,max-sg-len", NULL);
451 if (!p)
452 dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n",
453 __func__);
454 else
455 nx_of_update_sglen(dev, p, props);
456
457 p = of_find_property(base_node, "ibm,max-sync-cop", NULL);
458 if (!p)
459 dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n",
460 __func__);
461 else
462 nx_of_update_msc(dev, p, props);
463}
464
465static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot)
466{
467 struct alg_props *props = &nx_driver.of.ap[fc][mode][slot];
468
469 if (!props->sglen || props->databytelen < NX_PAGE_SIZE) {
470 if (dev)
471 dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: "
472 "%u/%u (ignored)\n", fc, mode, slot,
473 props->sglen, props->databytelen);
474 return false;
475 }
476
477 return true;
478}
479
480static bool nx_check_props(struct device *dev, u32 fc, u32 mode)
481{
482 int i;
483
484 for (i = 0; i < 3; i++)
485 if (!nx_check_prop(dev, fc, mode, i))
486 return false;
487
488 return true;
489}
490
491static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
492{
493 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
494 crypto_register_skcipher(alg) : 0;
495}
496
497static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode)
498{
499 return nx_check_props(&nx_driver.viodev->dev, fc, mode) ?
500 crypto_register_aead(alg) : 0;
501}
502
503static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot)
504{
505 return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev,
506 fc, mode, slot) :
507 nx_check_props(&nx_driver.viodev->dev, fc, mode)) ?
508 crypto_register_shash(alg) : 0;
509}
510
511static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode)
512{
513 if (nx_check_props(NULL, fc, mode))
514 crypto_unregister_skcipher(alg);
515}
516
517static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode)
518{
519 if (nx_check_props(NULL, fc, mode))
520 crypto_unregister_aead(alg);
521}
522
523static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode,
524 int slot)
525{
526 if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) :
527 nx_check_props(NULL, fc, mode))
528 crypto_unregister_shash(alg);
529}
530
531/**
532 * nx_register_algs - register algorithms with the crypto API
533 *
534 * Called from nx_probe()
535 *
536 * If all OF properties are in an acceptable state, the driver flags will
537 * indicate that we're ready and we'll create our debugfs files and register
538 * out crypto algorithms.
539 */
540static int nx_register_algs(void)
541{
542 int rc = -1;
543
544 if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY)
545 goto out;
546
547 memset(&nx_driver.stats, 0, sizeof(struct nx_stats));
548
549 NX_DEBUGFS_INIT(&nx_driver);
550
551 nx_driver.of.status = NX_OKAY;
552
553 rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
554 if (rc)
555 goto out;
556
557 rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
558 if (rc)
559 goto out_unreg_ecb;
560
561 rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES,
562 NX_MODE_AES_CTR);
563 if (rc)
564 goto out_unreg_cbc;
565
566 rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
567 if (rc)
568 goto out_unreg_ctr3686;
569
570 rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
571 if (rc)
572 goto out_unreg_gcm;
573
574 rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
575 if (rc)
576 goto out_unreg_gcm4106;
577
578 rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
579 if (rc)
580 goto out_unreg_ccm;
581
582 rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
583 NX_PROPS_SHA256);
584 if (rc)
585 goto out_unreg_ccm4309;
586
587 rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
588 NX_PROPS_SHA512);
589 if (rc)
590 goto out_unreg_s256;
591
592 rc = nx_register_shash(&nx_shash_aes_xcbc_alg,
593 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
594 if (rc)
595 goto out_unreg_s512;
596
597 goto out;
598
599out_unreg_s512:
600 nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA,
601 NX_PROPS_SHA512);
602out_unreg_s256:
603 nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA,
604 NX_PROPS_SHA256);
605out_unreg_ccm4309:
606 nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
607out_unreg_ccm:
608 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
609out_unreg_gcm4106:
610 nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
611out_unreg_gcm:
612 nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM);
613out_unreg_ctr3686:
614 nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR);
615out_unreg_cbc:
616 nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC);
617out_unreg_ecb:
618 nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB);
619out:
620 return rc;
621}
622
623/**
624 * nx_crypto_ctx_init - create and initialize a crypto api context
625 *
626 * @nx_ctx: the crypto api context
627 * @fc: function code for the context
628 * @mode: the function code specific mode for this context
629 */
630static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
631{
632 if (nx_driver.of.status != NX_OKAY) {
633 pr_err("Attempt to initialize NX crypto context while device "
634 "is not available!\n");
635 return -ENODEV;
636 }
637
638 /* we need an extra page for csbcpb_aead for these modes */
639 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
640 nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
641 sizeof(struct nx_csbcpb);
642 else
643 nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
644 sizeof(struct nx_csbcpb);
645
646 nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
647 if (!nx_ctx->kmem)
648 return -ENOMEM;
649
650 /* the csbcpb and scatterlists must be 4K aligned pages */
651 nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem,
652 (u64)NX_PAGE_SIZE));
653 nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE);
654 nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE);
655
656 if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
657 nx_ctx->csbcpb_aead =
658 (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg +
659 NX_PAGE_SIZE);
660
661 /* give each context a pointer to global stats and their OF
662 * properties */
663 nx_ctx->stats = &nx_driver.stats;
664 memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode],
665 sizeof(struct alg_props) * 3);
666
667 return 0;
668}
669
670/* entry points from the crypto tfm initializers */
671int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm)
672{
673 crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx));
674 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
675 NX_MODE_AES_CCM);
676}
677
678int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm)
679{
680 crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx));
681 return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES,
682 NX_MODE_AES_GCM);
683}
684
685int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm)
686{
687 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
688 NX_MODE_AES_CTR);
689}
690
691int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm)
692{
693 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
694 NX_MODE_AES_CBC);
695}
696
697int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm)
698{
699 return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES,
700 NX_MODE_AES_ECB);
701}
702
703int nx_crypto_ctx_sha_init(struct crypto_shash *tfm)
704{
705 return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_SHA, NX_MODE_SHA);
706}
707
708int nx_crypto_ctx_aes_xcbc_init(struct crypto_shash *tfm)
709{
710 return nx_crypto_ctx_init(crypto_shash_ctx(tfm), NX_FC_AES,
711 NX_MODE_AES_XCBC_MAC);
712}
713
714/**
715 * nx_crypto_ctx_exit - destroy a crypto api context
716 *
717 * @tfm: the crypto transform pointer for the context
718 *
719 * As crypto API contexts are destroyed, this exit hook is called to free the
720 * memory associated with it.
721 */
722void nx_crypto_ctx_exit(struct crypto_tfm *tfm)
723{
724 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
725
726 kfree_sensitive(nx_ctx->kmem);
727 nx_ctx->csbcpb = NULL;
728 nx_ctx->csbcpb_aead = NULL;
729 nx_ctx->in_sg = NULL;
730 nx_ctx->out_sg = NULL;
731}
732
733void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm)
734{
735 nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm));
736}
737
738void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm)
739{
740 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
741
742 kfree_sensitive(nx_ctx->kmem);
743}
744
745void nx_crypto_ctx_shash_exit(struct crypto_shash *tfm)
746{
747 nx_crypto_ctx_exit(crypto_shash_ctx(tfm));
748}
749
750static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id)
751{
752 dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n",
753 viodev->name, viodev->resource_id);
754
755 if (nx_driver.viodev) {
756 dev_err(&viodev->dev, "%s: Attempt to register more than one "
757 "instance of the hardware\n", __func__);
758 return -EINVAL;
759 }
760
761 nx_driver.viodev = viodev;
762
763 nx_of_init(&viodev->dev, &nx_driver.of);
764
765 return nx_register_algs();
766}
767
768static void nx_remove(struct vio_dev *viodev)
769{
770 dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n",
771 viodev->unit_address);
772
773 if (nx_driver.of.status == NX_OKAY) {
774 NX_DEBUGFS_FINI(&nx_driver);
775
776 nx_unregister_shash(&nx_shash_aes_xcbc_alg,
777 NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1);
778 nx_unregister_shash(&nx_shash_sha512_alg,
779 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256);
780 nx_unregister_shash(&nx_shash_sha256_alg,
781 NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512);
782 nx_unregister_aead(&nx_ccm4309_aes_alg,
783 NX_FC_AES, NX_MODE_AES_CCM);
784 nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM);
785 nx_unregister_aead(&nx_gcm4106_aes_alg,
786 NX_FC_AES, NX_MODE_AES_GCM);
787 nx_unregister_aead(&nx_gcm_aes_alg,
788 NX_FC_AES, NX_MODE_AES_GCM);
789 nx_unregister_skcipher(&nx_ctr3686_aes_alg,
790 NX_FC_AES, NX_MODE_AES_CTR);
791 nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES,
792 NX_MODE_AES_CBC);
793 nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES,
794 NX_MODE_AES_ECB);
795 }
796}
797
798
799/* module wide initialization/cleanup */
800static int __init nx_init(void)
801{
802 return vio_register_driver(&nx_driver.viodriver);
803}
804
805static void __exit nx_fini(void)
806{
807 vio_unregister_driver(&nx_driver.viodriver);
808}
809
810static const struct vio_device_id nx_crypto_driver_ids[] = {
811 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
812 { "", "" }
813};
814MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids);
815
816/* driver state structure */
817struct nx_crypto_driver nx_driver = {
818 .viodriver = {
819 .id_table = nx_crypto_driver_ids,
820 .probe = nx_probe,
821 .remove = nx_remove,
822 .name = NX_NAME,
823 },
824};
825
826module_init(nx_init);
827module_exit(nx_fini);
828
829MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
830MODULE_DESCRIPTION(NX_STRING);
831MODULE_LICENSE("GPL");
832MODULE_VERSION(NX_VERSION);