2 * Routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2011-2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/hash.h>
23 #include <crypto/hash.h>
24 #include <crypto/aes.h>
25 #include <crypto/sha.h>
26 #include <crypto/algapi.h>
27 #include <crypto/scatterwalk.h>
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
32 #include <linux/crypto.h>
33 #include <linux/scatterlist.h>
34 #include <linux/device.h>
36 #include <asm/hvcall.h>
39 #include "nx_csbcpb.h"
44 * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure
46 * @nx_ctx: the crypto context handle
47 * @op: PFO operation struct to pass in
48 * @may_sleep: flag indicating the request can sleep
50 * Make the hcall, retrying while the hardware is busy. If we cannot yield
51 * the thread, limit the number of retries to 10 here.
53 int nx_hcall_sync(struct nx_crypto_ctx
*nx_ctx
,
54 struct vio_pfo_op
*op
,
58 struct vio_dev
*viodev
= nx_driver
.viodev
;
60 atomic_inc(&(nx_ctx
->stats
->sync_ops
));
63 rc
= vio_h_cop_sync(viodev
, op
);
64 } while (rc
== -EBUSY
&& !may_sleep
&& retries
--);
67 dev_dbg(&viodev
->dev
, "vio_h_cop_sync failed: rc: %d "
68 "hcall rc: %ld\n", rc
, op
->hcall_err
);
69 atomic_inc(&(nx_ctx
->stats
->errors
));
70 atomic_set(&(nx_ctx
->stats
->last_error
), op
->hcall_err
);
71 atomic_set(&(nx_ctx
->stats
->last_error_pid
), current
->pid
);
78 * nx_build_sg_list - build an NX scatter list describing a single buffer
80 * @sg_head: pointer to the first scatter list element to build
81 * @start_addr: pointer to the linear buffer
82 * @len: length of the data at @start_addr
83 * @sgmax: the largest number of scatter list elements we're allowed to create
85 * This function will start writing nx_sg elements at @sg_head and keep
86 * writing them until all of the data from @start_addr is described or
87 * until sgmax elements have been written. Scatter list elements will be
88 * created such that none of the elements describes a buffer that crosses a 4K
91 struct nx_sg
*nx_build_sg_list(struct nx_sg
*sg_head
,
96 unsigned int sg_len
= 0;
98 u64 sg_addr
= (u64
)start_addr
;
101 /* determine the start and end for this address range - slightly
102 * different if this is in VMALLOC_REGION */
103 if (is_vmalloc_addr(start_addr
))
104 sg_addr
= page_to_phys(vmalloc_to_page(start_addr
))
105 + offset_in_page(sg_addr
);
107 sg_addr
= __pa(sg_addr
);
109 end_addr
= sg_addr
+ *len
;
111 /* each iteration will write one struct nx_sg element and add the
112 * length of data described by that element to sg_len. Once @len bytes
113 * have been described (or @sgmax elements have been written), the
114 * loop ends. min_t is used to ensure @end_addr falls on the same page
115 * as sg_addr, if not, we need to create another nx_sg element for the
116 * data on the next page.
118 * Also when using vmalloc'ed data, every time that a system page
119 * boundary is crossed the physical address needs to be re-calculated.
121 for (sg
= sg_head
; sg_len
< *len
; sg
++) {
125 sg_addr
= min_t(u64
, NX_PAGE_NUM(sg_addr
+ NX_PAGE_SIZE
),
128 next_page
= (sg
->addr
& PAGE_MASK
) + PAGE_SIZE
;
129 sg
->len
= min_t(u64
, sg_addr
, next_page
) - sg
->addr
;
132 if (sg_addr
>= next_page
&&
133 is_vmalloc_addr(start_addr
+ sg_len
)) {
134 sg_addr
= page_to_phys(vmalloc_to_page(
135 start_addr
+ sg_len
));
136 end_addr
= sg_addr
+ *len
- sg_len
;
139 if ((sg
- sg_head
) == sgmax
) {
140 pr_err("nx: scatter/gather list overflow, pid: %d\n",
148 /* return the moved sg_head pointer */
153 * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist
155 * @nx_dst: pointer to the first nx_sg element to write
156 * @sglen: max number of nx_sg entries we're allowed to write
157 * @sg_src: pointer to the source linux scatterlist to walk
158 * @start: number of bytes to fast-forward past at the beginning of @sg_src
159 * @src_len: number of bytes to walk in @sg_src
161 struct nx_sg
*nx_walk_and_build(struct nx_sg
*nx_dst
,
163 struct scatterlist
*sg_src
,
165 unsigned int *src_len
)
167 struct scatter_walk walk
;
168 struct nx_sg
*nx_sg
= nx_dst
;
169 unsigned int n
, offset
= 0, len
= *src_len
;
172 /* we need to fast forward through @start bytes first */
174 scatterwalk_start(&walk
, sg_src
);
176 if (start
< offset
+ sg_src
->length
)
179 offset
+= sg_src
->length
;
180 sg_src
= sg_next(sg_src
);
183 /* start - offset is the number of bytes to advance in the scatterlist
184 * element we're currently looking at */
185 scatterwalk_advance(&walk
, start
- offset
);
187 while (len
&& (nx_sg
- nx_dst
) < sglen
) {
188 n
= scatterwalk_clamp(&walk
, len
);
190 /* In cases where we have scatterlist chain sg_next
191 * handles with it properly */
192 scatterwalk_start(&walk
, sg_next(walk
.sg
));
193 n
= scatterwalk_clamp(&walk
, len
);
195 dst
= scatterwalk_map(&walk
);
197 nx_sg
= nx_build_sg_list(nx_sg
, dst
, &n
, sglen
- (nx_sg
- nx_dst
));
200 scatterwalk_unmap(dst
);
201 scatterwalk_advance(&walk
, n
);
202 scatterwalk_done(&walk
, SCATTERWALK_FROM_SG
, len
);
204 /* update to_process */
207 /* return the moved destination pointer */
212 * trim_sg_list - ensures the bound in sg list.
215 * @delta: is the amount we need to crop in order to bound the list.
218 static long int trim_sg_list(struct nx_sg
*sg
,
221 unsigned int *nbytes
)
225 unsigned int is_delta
= delta
;
227 while (delta
&& end
> sg
) {
228 struct nx_sg
*last
= end
- 1;
230 if (last
->len
> delta
) {
239 /* There are cases where we need to crop list in order to make it
240 * a block size multiple, but we also need to align data. In order to
241 * that we need to calculate how much we need to put back to be
244 oplen
= (sg
- end
) * sizeof(struct nx_sg
);
246 data_back
= (abs(oplen
) / AES_BLOCK_SIZE
) * sg
->len
;
247 data_back
= *nbytes
- (data_back
& ~(AES_BLOCK_SIZE
- 1));
248 *nbytes
-= data_back
;
255 * nx_sha_build_sg_list - walk and build sg list to sha modes
256 * using right bounds and limits.
257 * @nx_ctx: NX crypto context for the lists we're building
258 * @nx_sg: current sg list in or out list
259 * @op_len: current op_len to be used in order to build a sg list
260 * @nbytes: number or bytes to be processed
261 * @offset: buf offset
262 * @mode: SHA256 or SHA512
264 int nx_sha_build_sg_list(struct nx_crypto_ctx
*nx_ctx
,
265 struct nx_sg
*nx_in_outsg
,
267 unsigned int *nbytes
,
271 unsigned int delta
= 0;
272 unsigned int total
= *nbytes
;
273 struct nx_sg
*nx_insg
= nx_in_outsg
;
274 unsigned int max_sg_len
;
276 max_sg_len
= min_t(u64
, nx_ctx
->ap
->sglen
,
277 nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
));
278 max_sg_len
= min_t(u64
, max_sg_len
,
279 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
281 *nbytes
= min_t(u64
, *nbytes
, nx_ctx
->ap
->databytelen
);
282 nx_insg
= nx_build_sg_list(nx_insg
, offset
, nbytes
, max_sg_len
);
287 delta
= *nbytes
- (*nbytes
& ~(SHA256_BLOCK_SIZE
- 1));
291 delta
= *nbytes
- (*nbytes
& ~(SHA512_BLOCK_SIZE
- 1));
296 *op_len
= trim_sg_list(nx_in_outsg
, nx_insg
, delta
);
302 * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
303 * scatterlists based on them.
305 * @nx_ctx: NX crypto context for the lists we're building
306 * @desc: the block cipher descriptor for the operation
307 * @dst: destination scatterlist
308 * @src: source scatterlist
309 * @nbytes: length of data described in the scatterlists
310 * @offset: number of bytes to fast-forward past at the beginning of
312 * @iv: destination for the iv data, if the algorithm requires it
314 * This is common code shared by all the AES algorithms. It uses the block
315 * cipher walk routines to traverse input and output scatterlists, building
316 * corresponding NX scatterlists
318 int nx_build_sg_lists(struct nx_crypto_ctx
*nx_ctx
,
319 struct blkcipher_desc
*desc
,
320 struct scatterlist
*dst
,
321 struct scatterlist
*src
,
322 unsigned int *nbytes
,
326 unsigned int delta
= 0;
327 unsigned int total
= *nbytes
;
328 struct nx_sg
*nx_insg
= nx_ctx
->in_sg
;
329 struct nx_sg
*nx_outsg
= nx_ctx
->out_sg
;
330 unsigned int max_sg_len
;
332 max_sg_len
= min_t(u64
, nx_ctx
->ap
->sglen
,
333 nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
));
334 max_sg_len
= min_t(u64
, max_sg_len
,
335 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
338 memcpy(iv
, desc
->info
, AES_BLOCK_SIZE
);
340 *nbytes
= min_t(u64
, *nbytes
, nx_ctx
->ap
->databytelen
);
342 nx_outsg
= nx_walk_and_build(nx_outsg
, max_sg_len
, dst
,
344 nx_insg
= nx_walk_and_build(nx_insg
, max_sg_len
, src
,
348 delta
= *nbytes
- (*nbytes
& ~(AES_BLOCK_SIZE
- 1));
350 /* these lengths should be negative, which will indicate to phyp that
351 * the input and output parameters are scatterlists, not linear
353 nx_ctx
->op
.inlen
= trim_sg_list(nx_ctx
->in_sg
, nx_insg
, delta
, nbytes
);
354 nx_ctx
->op
.outlen
= trim_sg_list(nx_ctx
->out_sg
, nx_outsg
, delta
, nbytes
);
360 * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct
362 * @nx_ctx: the nx context to initialize
363 * @function: the function code for the op
365 void nx_ctx_init(struct nx_crypto_ctx
*nx_ctx
, unsigned int function
)
367 spin_lock_init(&nx_ctx
->lock
);
368 memset(nx_ctx
->kmem
, 0, nx_ctx
->kmem_len
);
369 nx_ctx
->csbcpb
->csb
.valid
|= NX_CSB_VALID_BIT
;
371 nx_ctx
->op
.flags
= function
;
372 nx_ctx
->op
.csbcpb
= __pa(nx_ctx
->csbcpb
);
373 nx_ctx
->op
.in
= __pa(nx_ctx
->in_sg
);
374 nx_ctx
->op
.out
= __pa(nx_ctx
->out_sg
);
376 if (nx_ctx
->csbcpb_aead
) {
377 nx_ctx
->csbcpb_aead
->csb
.valid
|= NX_CSB_VALID_BIT
;
379 nx_ctx
->op_aead
.flags
= function
;
380 nx_ctx
->op_aead
.csbcpb
= __pa(nx_ctx
->csbcpb_aead
);
381 nx_ctx
->op_aead
.in
= __pa(nx_ctx
->in_sg
);
382 nx_ctx
->op_aead
.out
= __pa(nx_ctx
->out_sg
);
386 static void nx_of_update_status(struct device
*dev
,
390 if (!strncmp(p
->value
, "okay", p
->length
)) {
391 props
->status
= NX_WAITING
;
392 props
->flags
|= NX_OF_FLAG_STATUS_SET
;
394 dev_info(dev
, "%s: status '%s' is not 'okay'\n", __func__
,
399 static void nx_of_update_sglen(struct device
*dev
,
403 if (p
->length
!= sizeof(props
->max_sg_len
)) {
404 dev_err(dev
, "%s: unexpected format for "
405 "ibm,max-sg-len property\n", __func__
);
406 dev_dbg(dev
, "%s: ibm,max-sg-len is %d bytes "
407 "long, expected %zd bytes\n", __func__
,
408 p
->length
, sizeof(props
->max_sg_len
));
412 props
->max_sg_len
= *(u32
*)p
->value
;
413 props
->flags
|= NX_OF_FLAG_MAXSGLEN_SET
;
416 static void nx_of_update_msc(struct device
*dev
,
420 struct msc_triplet
*trip
;
421 struct max_sync_cop
*msc
;
422 unsigned int bytes_so_far
, i
, lenp
;
424 msc
= (struct max_sync_cop
*)p
->value
;
427 /* You can't tell if the data read in for this property is sane by its
428 * size alone. This is because there are sizes embedded in the data
429 * structure. The best we can do is check lengths as we parse and bail
430 * as soon as a length error is detected. */
433 while ((bytes_so_far
+ sizeof(struct max_sync_cop
)) <= lenp
) {
434 bytes_so_far
+= sizeof(struct max_sync_cop
);
439 ((bytes_so_far
+ sizeof(struct msc_triplet
)) <= lenp
) &&
442 if (msc
->fc
> NX_MAX_FC
|| msc
->mode
> NX_MAX_MODE
) {
443 dev_err(dev
, "unknown function code/mode "
444 "combo: %d/%d (ignored)\n", msc
->fc
,
449 switch (trip
->keybitlen
) {
452 props
->ap
[msc
->fc
][msc
->mode
][0].databytelen
=
454 props
->ap
[msc
->fc
][msc
->mode
][0].sglen
=
458 props
->ap
[msc
->fc
][msc
->mode
][1].databytelen
=
460 props
->ap
[msc
->fc
][msc
->mode
][1].sglen
=
464 if (msc
->fc
== NX_FC_AES
) {
465 props
->ap
[msc
->fc
][msc
->mode
][2].
466 databytelen
= trip
->databytelen
;
467 props
->ap
[msc
->fc
][msc
->mode
][2].sglen
=
469 } else if (msc
->fc
== NX_FC_AES_HMAC
||
470 msc
->fc
== NX_FC_SHA
) {
471 props
->ap
[msc
->fc
][msc
->mode
][1].
472 databytelen
= trip
->databytelen
;
473 props
->ap
[msc
->fc
][msc
->mode
][1].sglen
=
476 dev_warn(dev
, "unknown function "
477 "code/key bit len combo"
478 ": (%u/256)\n", msc
->fc
);
482 props
->ap
[msc
->fc
][msc
->mode
][2].databytelen
=
484 props
->ap
[msc
->fc
][msc
->mode
][2].sglen
=
488 dev_warn(dev
, "unknown function code/key bit "
489 "len combo: (%u/%u)\n", msc
->fc
,
494 bytes_so_far
+= sizeof(struct msc_triplet
);
498 msc
= (struct max_sync_cop
*)trip
;
501 props
->flags
|= NX_OF_FLAG_MAXSYNCCOP_SET
;
505 * nx_of_init - read openFirmware values from the device tree
507 * @dev: device handle
508 * @props: pointer to struct to hold the properties values
510 * Called once at driver probe time, this function will read out the
511 * openFirmware properties we use at runtime. If all the OF properties are
512 * acceptable, when we exit this function props->flags will indicate that
513 * we're ready to register our crypto algorithms.
515 static void nx_of_init(struct device
*dev
, struct nx_of
*props
)
517 struct device_node
*base_node
= dev
->of_node
;
520 p
= of_find_property(base_node
, "status", NULL
);
522 dev_info(dev
, "%s: property 'status' not found\n", __func__
);
524 nx_of_update_status(dev
, p
, props
);
526 p
= of_find_property(base_node
, "ibm,max-sg-len", NULL
);
528 dev_info(dev
, "%s: property 'ibm,max-sg-len' not found\n",
531 nx_of_update_sglen(dev
, p
, props
);
533 p
= of_find_property(base_node
, "ibm,max-sync-cop", NULL
);
535 dev_info(dev
, "%s: property 'ibm,max-sync-cop' not found\n",
538 nx_of_update_msc(dev
, p
, props
);
542 * nx_register_algs - register algorithms with the crypto API
544 * Called from nx_probe()
546 * If all OF properties are in an acceptable state, the driver flags will
547 * indicate that we're ready and we'll create our debugfs files and register
548 * out crypto algorithms.
550 static int nx_register_algs(void)
554 if (nx_driver
.of
.flags
!= NX_OF_FLAG_MASK_READY
)
557 memset(&nx_driver
.stats
, 0, sizeof(struct nx_stats
));
559 rc
= NX_DEBUGFS_INIT(&nx_driver
);
563 nx_driver
.of
.status
= NX_OKAY
;
565 rc
= crypto_register_alg(&nx_ecb_aes_alg
);
569 rc
= crypto_register_alg(&nx_cbc_aes_alg
);
573 rc
= crypto_register_alg(&nx_ctr_aes_alg
);
577 rc
= crypto_register_alg(&nx_ctr3686_aes_alg
);
581 rc
= crypto_register_alg(&nx_gcm_aes_alg
);
583 goto out_unreg_ctr3686
;
585 rc
= crypto_register_alg(&nx_gcm4106_aes_alg
);
589 rc
= crypto_register_alg(&nx_ccm_aes_alg
);
591 goto out_unreg_gcm4106
;
593 rc
= crypto_register_alg(&nx_ccm4309_aes_alg
);
597 rc
= crypto_register_shash(&nx_shash_sha256_alg
);
599 goto out_unreg_ccm4309
;
601 rc
= crypto_register_shash(&nx_shash_sha512_alg
);
605 rc
= crypto_register_shash(&nx_shash_aes_xcbc_alg
);
612 crypto_unregister_shash(&nx_shash_sha512_alg
);
614 crypto_unregister_shash(&nx_shash_sha256_alg
);
616 crypto_unregister_alg(&nx_ccm4309_aes_alg
);
618 crypto_unregister_alg(&nx_ccm_aes_alg
);
620 crypto_unregister_alg(&nx_gcm4106_aes_alg
);
622 crypto_unregister_alg(&nx_gcm_aes_alg
);
624 crypto_unregister_alg(&nx_ctr3686_aes_alg
);
626 crypto_unregister_alg(&nx_ctr_aes_alg
);
628 crypto_unregister_alg(&nx_cbc_aes_alg
);
630 crypto_unregister_alg(&nx_ecb_aes_alg
);
636 * nx_crypto_ctx_init - create and initialize a crypto api context
638 * @nx_ctx: the crypto api context
639 * @fc: function code for the context
640 * @mode: the function code specific mode for this context
642 static int nx_crypto_ctx_init(struct nx_crypto_ctx
*nx_ctx
, u32 fc
, u32 mode
)
644 if (nx_driver
.of
.status
!= NX_OKAY
) {
645 pr_err("Attempt to initialize NX crypto context while device "
646 "is not available!\n");
650 /* we need an extra page for csbcpb_aead for these modes */
651 if (mode
== NX_MODE_AES_GCM
|| mode
== NX_MODE_AES_CCM
)
652 nx_ctx
->kmem_len
= (5 * NX_PAGE_SIZE
) +
653 sizeof(struct nx_csbcpb
);
655 nx_ctx
->kmem_len
= (4 * NX_PAGE_SIZE
) +
656 sizeof(struct nx_csbcpb
);
658 nx_ctx
->kmem
= kmalloc(nx_ctx
->kmem_len
, GFP_KERNEL
);
662 /* the csbcpb and scatterlists must be 4K aligned pages */
663 nx_ctx
->csbcpb
= (struct nx_csbcpb
*)(round_up((u64
)nx_ctx
->kmem
,
665 nx_ctx
->in_sg
= (struct nx_sg
*)((u8
*)nx_ctx
->csbcpb
+ NX_PAGE_SIZE
);
666 nx_ctx
->out_sg
= (struct nx_sg
*)((u8
*)nx_ctx
->in_sg
+ NX_PAGE_SIZE
);
668 if (mode
== NX_MODE_AES_GCM
|| mode
== NX_MODE_AES_CCM
)
669 nx_ctx
->csbcpb_aead
=
670 (struct nx_csbcpb
*)((u8
*)nx_ctx
->out_sg
+
673 /* give each context a pointer to global stats and their OF
675 nx_ctx
->stats
= &nx_driver
.stats
;
676 memcpy(nx_ctx
->props
, nx_driver
.of
.ap
[fc
][mode
],
677 sizeof(struct alg_props
) * 3);
682 /* entry points from the crypto tfm initializers */
683 int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm
*tfm
)
685 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_AES
,
689 int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm
*tfm
)
691 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_AES
,
695 int nx_crypto_ctx_aes_ctr_init(struct crypto_tfm
*tfm
)
697 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_AES
,
701 int nx_crypto_ctx_aes_cbc_init(struct crypto_tfm
*tfm
)
703 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_AES
,
707 int nx_crypto_ctx_aes_ecb_init(struct crypto_tfm
*tfm
)
709 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_AES
,
713 int nx_crypto_ctx_sha_init(struct crypto_tfm
*tfm
)
715 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_SHA
, NX_MODE_SHA
);
718 int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm
*tfm
)
720 return nx_crypto_ctx_init(crypto_tfm_ctx(tfm
), NX_FC_AES
,
721 NX_MODE_AES_XCBC_MAC
);
725 * nx_crypto_ctx_exit - destroy a crypto api context
727 * @tfm: the crypto transform pointer for the context
729 * As crypto API contexts are destroyed, this exit hook is called to free the
730 * memory associated with it.
732 void nx_crypto_ctx_exit(struct crypto_tfm
*tfm
)
734 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(tfm
);
736 kzfree(nx_ctx
->kmem
);
737 nx_ctx
->csbcpb
= NULL
;
738 nx_ctx
->csbcpb_aead
= NULL
;
739 nx_ctx
->in_sg
= NULL
;
740 nx_ctx
->out_sg
= NULL
;
743 static int nx_probe(struct vio_dev
*viodev
, const struct vio_device_id
*id
)
745 dev_dbg(&viodev
->dev
, "driver probed: %s resource id: 0x%x\n",
746 viodev
->name
, viodev
->resource_id
);
748 if (nx_driver
.viodev
) {
749 dev_err(&viodev
->dev
, "%s: Attempt to register more than one "
750 "instance of the hardware\n", __func__
);
754 nx_driver
.viodev
= viodev
;
756 nx_of_init(&viodev
->dev
, &nx_driver
.of
);
758 return nx_register_algs();
761 static int nx_remove(struct vio_dev
*viodev
)
763 dev_dbg(&viodev
->dev
, "entering nx_remove for UA 0x%x\n",
764 viodev
->unit_address
);
766 if (nx_driver
.of
.status
== NX_OKAY
) {
767 NX_DEBUGFS_FINI(&nx_driver
);
769 crypto_unregister_alg(&nx_ccm_aes_alg
);
770 crypto_unregister_alg(&nx_ccm4309_aes_alg
);
771 crypto_unregister_alg(&nx_gcm_aes_alg
);
772 crypto_unregister_alg(&nx_gcm4106_aes_alg
);
773 crypto_unregister_alg(&nx_ctr_aes_alg
);
774 crypto_unregister_alg(&nx_ctr3686_aes_alg
);
775 crypto_unregister_alg(&nx_cbc_aes_alg
);
776 crypto_unregister_alg(&nx_ecb_aes_alg
);
777 crypto_unregister_shash(&nx_shash_sha256_alg
);
778 crypto_unregister_shash(&nx_shash_sha512_alg
);
779 crypto_unregister_shash(&nx_shash_aes_xcbc_alg
);
786 /* module wide initialization/cleanup */
787 static int __init
nx_init(void)
789 return vio_register_driver(&nx_driver
.viodriver
);
792 static void __exit
nx_fini(void)
794 vio_unregister_driver(&nx_driver
.viodriver
);
797 static struct vio_device_id nx_crypto_driver_ids
[] = {
798 { "ibm,sym-encryption-v1", "ibm,sym-encryption" },
801 MODULE_DEVICE_TABLE(vio
, nx_crypto_driver_ids
);
803 /* driver state structure */
804 struct nx_crypto_driver nx_driver
= {
806 .id_table
= nx_crypto_driver_ids
,
813 module_init(nx_init
);
814 module_exit(nx_fini
);
816 MODULE_AUTHOR("Kent Yoder <yoder1@us.ibm.com>");
817 MODULE_DESCRIPTION(NX_STRING
);
818 MODULE_LICENSE("GPL");
819 MODULE_VERSION(NX_VERSION
);