4 * Copyright IBM Corp. 2001, 2012
5 * Author(s): Robert Burroughs
6 * Eric Rossman (edrossma@us.ibm.com)
7 * Cornelia Huck <cornelia.huck@de.ibm.com>
9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Ralph Wuerthner <rwuerthn@de.ibm.com>
12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/miscdevice.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/compat.h>
37 #include <linux/slab.h>
38 #include <linux/atomic.h>
39 #include <linux/uaccess.h>
40 #include <linux/hw_random.h>
41 #include <linux/debugfs.h>
42 #include <asm/debug.h>
44 #define CREATE_TRACE_POINTS
45 #include <asm/trace/zcrypt.h>
47 #include "zcrypt_api.h"
48 #include "zcrypt_debug.h"
50 #include "zcrypt_msgtype6.h"
51 #include "zcrypt_msgtype50.h"
56 MODULE_AUTHOR("IBM Corporation");
57 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
58 "Copyright IBM Corp. 2001, 2012");
59 MODULE_LICENSE("GPL");
62 * zcrypt tracepoint functions
64 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req
);
65 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep
);
67 static int zcrypt_hwrng_seed
= 1;
68 module_param_named(hwrng_seed
, zcrypt_hwrng_seed
, int, S_IRUSR
|S_IRGRP
);
69 MODULE_PARM_DESC(hwrng_seed
, "Turn on/off hwrng auto seed, default is 1 (on).");
71 DEFINE_SPINLOCK(zcrypt_list_lock
);
72 LIST_HEAD(zcrypt_card_list
);
73 int zcrypt_device_count
;
75 static atomic_t zcrypt_open_count
= ATOMIC_INIT(0);
76 static atomic_t zcrypt_rescan_count
= ATOMIC_INIT(0);
78 atomic_t zcrypt_rescan_req
= ATOMIC_INIT(0);
79 EXPORT_SYMBOL(zcrypt_rescan_req
);
81 static LIST_HEAD(zcrypt_ops_list
);
83 /* Zcrypt related debug feature stuff. */
84 debug_info_t
*zcrypt_dbf_info
;
87 * Process a rescan of the transport layer.
89 * Returns 1, if the rescan has been processed, otherwise 0.
91 static inline int zcrypt_process_rescan(void)
93 if (atomic_read(&zcrypt_rescan_req
)) {
94 atomic_set(&zcrypt_rescan_req
, 0);
95 atomic_inc(&zcrypt_rescan_count
);
96 ap_bus_force_rescan();
97 ZCRYPT_DBF(DBF_INFO
, "rescan count=%07d",
98 atomic_inc_return(&zcrypt_rescan_count
));
104 void zcrypt_msgtype_register(struct zcrypt_ops
*zops
)
106 list_add_tail(&zops
->list
, &zcrypt_ops_list
);
109 void zcrypt_msgtype_unregister(struct zcrypt_ops
*zops
)
111 list_del_init(&zops
->list
);
114 struct zcrypt_ops
*zcrypt_msgtype(unsigned char *name
, int variant
)
116 struct zcrypt_ops
*zops
;
118 list_for_each_entry(zops
, &zcrypt_ops_list
, list
)
119 if ((zops
->variant
== variant
) &&
120 (!strncmp(zops
->name
, name
, sizeof(zops
->name
))))
124 EXPORT_SYMBOL(zcrypt_msgtype
);
127 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
129 * This function is not supported beyond zcrypt 1.3.1.
131 static ssize_t
zcrypt_read(struct file
*filp
, char __user
*buf
,
132 size_t count
, loff_t
*f_pos
)
138 * zcrypt_write(): Not allowed.
140 * Write is is not allowed
142 static ssize_t
zcrypt_write(struct file
*filp
, const char __user
*buf
,
143 size_t count
, loff_t
*f_pos
)
149 * zcrypt_open(): Count number of users.
151 * Device open function to count number of users.
153 static int zcrypt_open(struct inode
*inode
, struct file
*filp
)
155 atomic_inc(&zcrypt_open_count
);
156 return nonseekable_open(inode
, filp
);
160 * zcrypt_release(): Count number of users.
162 * Device close function to count number of users.
164 static int zcrypt_release(struct inode
*inode
, struct file
*filp
)
166 atomic_dec(&zcrypt_open_count
);
170 static inline struct zcrypt_queue
*zcrypt_pick_queue(struct zcrypt_card
*zc
,
171 struct zcrypt_queue
*zq
,
174 if (!zq
|| !try_module_get(zq
->queue
->ap_dev
.drv
->driver
.owner
))
176 zcrypt_queue_get(zq
);
177 get_device(&zq
->queue
->ap_dev
.device
);
178 atomic_add(weight
, &zc
->load
);
179 atomic_add(weight
, &zq
->load
);
184 static inline void zcrypt_drop_queue(struct zcrypt_card
*zc
,
185 struct zcrypt_queue
*zq
,
188 struct module
*mod
= zq
->queue
->ap_dev
.drv
->driver
.owner
;
191 atomic_sub(weight
, &zc
->load
);
192 atomic_sub(weight
, &zq
->load
);
193 put_device(&zq
->queue
->ap_dev
.device
);
194 zcrypt_queue_put(zq
);
198 static inline bool zcrypt_card_compare(struct zcrypt_card
*zc
,
199 struct zcrypt_card
*pref_zc
,
200 unsigned weight
, unsigned pref_weight
)
204 weight
+= atomic_read(&zc
->load
);
205 pref_weight
+= atomic_read(&pref_zc
->load
);
206 if (weight
== pref_weight
)
207 return atomic_read(&zc
->card
->total_request_count
) >
208 atomic_read(&pref_zc
->card
->total_request_count
);
209 return weight
> pref_weight
;
212 static inline bool zcrypt_queue_compare(struct zcrypt_queue
*zq
,
213 struct zcrypt_queue
*pref_zq
,
214 unsigned weight
, unsigned pref_weight
)
218 weight
+= atomic_read(&zq
->load
);
219 pref_weight
+= atomic_read(&pref_zq
->load
);
220 if (weight
== pref_weight
)
221 return &zq
->queue
->total_request_count
>
222 &pref_zq
->queue
->total_request_count
;
223 return weight
> pref_weight
;
229 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo
*mex
)
231 struct zcrypt_card
*zc
, *pref_zc
;
232 struct zcrypt_queue
*zq
, *pref_zq
;
233 unsigned int weight
, pref_weight
;
234 unsigned int func_code
;
235 int qid
= 0, rc
= -ENODEV
;
237 trace_s390_zcrypt_req(mex
, TP_ICARSAMODEXPO
);
239 if (mex
->outputdatalength
< mex
->inputdatalength
) {
245 * As long as outputdatalength is big enough, we can set the
246 * outputdatalength equal to the inputdatalength, since that is the
247 * number of bytes we will copy in any case
249 mex
->outputdatalength
= mex
->inputdatalength
;
251 rc
= get_rsa_modex_fc(mex
, &func_code
);
257 spin_lock(&zcrypt_list_lock
);
258 for_each_zcrypt_card(zc
) {
259 /* Check for online accelarator and CCA cards */
260 if (!zc
->online
|| !(zc
->card
->functions
& 0x18000000))
262 /* Check for size limits */
263 if (zc
->min_mod_size
> mex
->inputdatalength
||
264 zc
->max_mod_size
< mex
->inputdatalength
)
266 /* get weight index of the card device */
267 weight
= zc
->speed_rating
[func_code
];
268 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
270 for_each_zcrypt_queue(zq
, zc
) {
271 /* check if device is online and eligible */
272 if (!zq
->online
|| !zq
->ops
->rsa_modexpo
)
274 if (zcrypt_queue_compare(zq
, pref_zq
,
275 weight
, pref_weight
))
279 pref_weight
= weight
;
282 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
283 spin_unlock(&zcrypt_list_lock
);
290 qid
= pref_zq
->queue
->qid
;
291 rc
= pref_zq
->ops
->rsa_modexpo(pref_zq
, mex
);
293 spin_lock(&zcrypt_list_lock
);
294 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
295 spin_unlock(&zcrypt_list_lock
);
298 trace_s390_zcrypt_rep(mex
, func_code
, rc
,
299 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
303 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt
*crt
)
305 struct zcrypt_card
*zc
, *pref_zc
;
306 struct zcrypt_queue
*zq
, *pref_zq
;
307 unsigned int weight
, pref_weight
;
308 unsigned int func_code
;
309 int qid
= 0, rc
= -ENODEV
;
311 trace_s390_zcrypt_req(crt
, TP_ICARSACRT
);
313 if (crt
->outputdatalength
< crt
->inputdatalength
) {
319 * As long as outputdatalength is big enough, we can set the
320 * outputdatalength equal to the inputdatalength, since that is the
321 * number of bytes we will copy in any case
323 crt
->outputdatalength
= crt
->inputdatalength
;
325 rc
= get_rsa_crt_fc(crt
, &func_code
);
331 spin_lock(&zcrypt_list_lock
);
332 for_each_zcrypt_card(zc
) {
333 /* Check for online accelarator and CCA cards */
334 if (!zc
->online
|| !(zc
->card
->functions
& 0x18000000))
336 /* Check for size limits */
337 if (zc
->min_mod_size
> crt
->inputdatalength
||
338 zc
->max_mod_size
< crt
->inputdatalength
)
340 /* get weight index of the card device */
341 weight
= zc
->speed_rating
[func_code
];
342 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
344 for_each_zcrypt_queue(zq
, zc
) {
345 /* check if device is online and eligible */
346 if (!zq
->online
|| !zq
->ops
->rsa_modexpo_crt
)
348 if (zcrypt_queue_compare(zq
, pref_zq
,
349 weight
, pref_weight
))
353 pref_weight
= weight
;
356 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
357 spin_unlock(&zcrypt_list_lock
);
364 qid
= pref_zq
->queue
->qid
;
365 rc
= pref_zq
->ops
->rsa_modexpo_crt(pref_zq
, crt
);
367 spin_lock(&zcrypt_list_lock
);
368 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
369 spin_unlock(&zcrypt_list_lock
);
372 trace_s390_zcrypt_rep(crt
, func_code
, rc
,
373 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
377 static long zcrypt_send_cprb(struct ica_xcRB
*xcRB
)
379 struct zcrypt_card
*zc
, *pref_zc
;
380 struct zcrypt_queue
*zq
, *pref_zq
;
381 struct ap_message ap_msg
;
382 unsigned int weight
, pref_weight
;
383 unsigned int func_code
;
384 unsigned short *domain
;
385 int qid
= 0, rc
= -ENODEV
;
387 trace_s390_zcrypt_req(xcRB
, TB_ZSECSENDCPRB
);
389 rc
= get_cprb_fc(xcRB
, &ap_msg
, &func_code
, &domain
);
395 spin_lock(&zcrypt_list_lock
);
396 for_each_zcrypt_card(zc
) {
397 /* Check for online CCA cards */
398 if (!zc
->online
|| !(zc
->card
->functions
& 0x10000000))
400 /* Check for user selected CCA card */
401 if (xcRB
->user_defined
!= AUTOSELECT
&&
402 xcRB
->user_defined
!= zc
->card
->id
)
404 /* get weight index of the card device */
405 weight
= speed_idx_cca(func_code
) * zc
->speed_rating
[SECKEY
];
406 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
408 for_each_zcrypt_queue(zq
, zc
) {
409 /* check if device is online and eligible */
411 !zq
->ops
->send_cprb
||
412 ((*domain
!= (unsigned short) AUTOSELECT
) &&
413 (*domain
!= AP_QID_QUEUE(zq
->queue
->qid
))))
415 if (zcrypt_queue_compare(zq
, pref_zq
,
416 weight
, pref_weight
))
420 pref_weight
= weight
;
423 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
424 spin_unlock(&zcrypt_list_lock
);
431 /* in case of auto select, provide the correct domain */
432 qid
= pref_zq
->queue
->qid
;
433 if (*domain
== (unsigned short) AUTOSELECT
)
434 *domain
= AP_QID_QUEUE(qid
);
436 rc
= pref_zq
->ops
->send_cprb(pref_zq
, xcRB
, &ap_msg
);
438 spin_lock(&zcrypt_list_lock
);
439 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
440 spin_unlock(&zcrypt_list_lock
);
443 trace_s390_zcrypt_rep(xcRB
, func_code
, rc
,
444 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
448 static bool is_desired_ep11_card(unsigned int dev_id
,
449 unsigned short target_num
,
450 struct ep11_target_dev
*targets
)
452 while (target_num
-- > 0) {
453 if (dev_id
== targets
->ap_id
)
460 static bool is_desired_ep11_queue(unsigned int dev_qid
,
461 unsigned short target_num
,
462 struct ep11_target_dev
*targets
)
464 while (target_num
-- > 0) {
465 if (AP_MKQID(targets
->ap_id
, targets
->dom_id
) == dev_qid
)
472 static long zcrypt_send_ep11_cprb(struct ep11_urb
*xcrb
)
474 struct zcrypt_card
*zc
, *pref_zc
;
475 struct zcrypt_queue
*zq
, *pref_zq
;
476 struct ep11_target_dev
*targets
;
477 unsigned short target_num
;
478 unsigned int weight
, pref_weight
;
479 unsigned int func_code
;
480 struct ap_message ap_msg
;
481 int qid
= 0, rc
= -ENODEV
;
483 trace_s390_zcrypt_req(xcrb
, TP_ZSENDEP11CPRB
);
485 target_num
= (unsigned short) xcrb
->targets_num
;
487 /* empty list indicates autoselect (all available targets) */
489 if (target_num
!= 0) {
490 struct ep11_target_dev __user
*uptr
;
492 targets
= kcalloc(target_num
, sizeof(*targets
), GFP_KERNEL
);
498 uptr
= (struct ep11_target_dev __force __user
*) xcrb
->targets
;
499 if (copy_from_user(targets
, uptr
,
500 target_num
* sizeof(*targets
))) {
506 rc
= get_ep11cprb_fc(xcrb
, &ap_msg
, &func_code
);
512 spin_lock(&zcrypt_list_lock
);
513 for_each_zcrypt_card(zc
) {
514 /* Check for online EP11 cards */
515 if (!zc
->online
|| !(zc
->card
->functions
& 0x04000000))
517 /* Check for user selected EP11 card */
519 !is_desired_ep11_card(zc
->card
->id
, target_num
, targets
))
521 /* get weight index of the card device */
522 weight
= speed_idx_ep11(func_code
) * zc
->speed_rating
[SECKEY
];
523 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
525 for_each_zcrypt_queue(zq
, zc
) {
526 /* check if device is online and eligible */
528 !zq
->ops
->send_ep11_cprb
||
530 !is_desired_ep11_queue(zq
->queue
->qid
,
531 target_num
, targets
)))
533 if (zcrypt_queue_compare(zq
, pref_zq
,
534 weight
, pref_weight
))
538 pref_weight
= weight
;
541 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
542 spin_unlock(&zcrypt_list_lock
);
549 qid
= pref_zq
->queue
->qid
;
550 rc
= pref_zq
->ops
->send_ep11_cprb(pref_zq
, xcrb
, &ap_msg
);
552 spin_lock(&zcrypt_list_lock
);
553 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
554 spin_unlock(&zcrypt_list_lock
);
559 trace_s390_zcrypt_rep(xcrb
, func_code
, rc
,
560 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
564 static long zcrypt_rng(char *buffer
)
566 struct zcrypt_card
*zc
, *pref_zc
;
567 struct zcrypt_queue
*zq
, *pref_zq
;
568 unsigned int weight
, pref_weight
;
569 unsigned int func_code
;
570 struct ap_message ap_msg
;
572 int qid
= 0, rc
= -ENODEV
;
574 trace_s390_zcrypt_req(buffer
, TP_HWRNGCPRB
);
576 rc
= get_rng_fc(&ap_msg
, &func_code
, &domain
);
582 spin_lock(&zcrypt_list_lock
);
583 for_each_zcrypt_card(zc
) {
584 /* Check for online CCA cards */
585 if (!zc
->online
|| !(zc
->card
->functions
& 0x10000000))
587 /* get weight index of the card device */
588 weight
= zc
->speed_rating
[func_code
];
589 if (zcrypt_card_compare(zc
, pref_zc
, weight
, pref_weight
))
591 for_each_zcrypt_queue(zq
, zc
) {
592 /* check if device is online and eligible */
593 if (!zq
->online
|| !zq
->ops
->rng
)
595 if (zcrypt_queue_compare(zq
, pref_zq
,
596 weight
, pref_weight
))
600 pref_weight
= weight
;
603 pref_zq
= zcrypt_pick_queue(pref_zc
, pref_zq
, weight
);
604 spin_unlock(&zcrypt_list_lock
);
609 qid
= pref_zq
->queue
->qid
;
610 rc
= pref_zq
->ops
->rng(pref_zq
, buffer
, &ap_msg
);
612 spin_lock(&zcrypt_list_lock
);
613 zcrypt_drop_queue(pref_zc
, pref_zq
, weight
);
614 spin_unlock(&zcrypt_list_lock
);
617 trace_s390_zcrypt_rep(buffer
, func_code
, rc
,
618 AP_QID_CARD(qid
), AP_QID_QUEUE(qid
));
622 static void zcrypt_device_status_mask(struct zcrypt_device_matrix
*matrix
)
624 struct zcrypt_card
*zc
;
625 struct zcrypt_queue
*zq
;
626 struct zcrypt_device_status
*stat
;
628 memset(matrix
, 0, sizeof(*matrix
));
629 spin_lock(&zcrypt_list_lock
);
630 for_each_zcrypt_card(zc
) {
631 for_each_zcrypt_queue(zq
, zc
) {
632 stat
= matrix
->device
;
633 stat
+= AP_QID_CARD(zq
->queue
->qid
) * MAX_ZDEV_DOMAINS
;
634 stat
+= AP_QID_QUEUE(zq
->queue
->qid
);
635 stat
->hwtype
= zc
->card
->ap_dev
.device_type
;
636 stat
->functions
= zc
->card
->functions
>> 26;
637 stat
->qid
= zq
->queue
->qid
;
638 stat
->online
= zq
->online
? 0x01 : 0x00;
641 spin_unlock(&zcrypt_list_lock
);
643 EXPORT_SYMBOL(zcrypt_device_status_mask
);
645 static void zcrypt_status_mask(char status
[AP_DEVICES
])
647 struct zcrypt_card
*zc
;
648 struct zcrypt_queue
*zq
;
650 memset(status
, 0, sizeof(char) * AP_DEVICES
);
651 spin_lock(&zcrypt_list_lock
);
652 for_each_zcrypt_card(zc
) {
653 for_each_zcrypt_queue(zq
, zc
) {
654 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
656 status
[AP_QID_CARD(zq
->queue
->qid
)] =
657 zc
->online
? zc
->user_space_type
: 0x0d;
660 spin_unlock(&zcrypt_list_lock
);
663 static void zcrypt_qdepth_mask(char qdepth
[AP_DEVICES
])
665 struct zcrypt_card
*zc
;
666 struct zcrypt_queue
*zq
;
668 memset(qdepth
, 0, sizeof(char) * AP_DEVICES
);
669 spin_lock(&zcrypt_list_lock
);
671 for_each_zcrypt_card(zc
) {
672 for_each_zcrypt_queue(zq
, zc
) {
673 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
675 spin_lock(&zq
->queue
->lock
);
676 qdepth
[AP_QID_CARD(zq
->queue
->qid
)] =
677 zq
->queue
->pendingq_count
+
678 zq
->queue
->requestq_count
;
679 spin_unlock(&zq
->queue
->lock
);
683 spin_unlock(&zcrypt_list_lock
);
686 static void zcrypt_perdev_reqcnt(int reqcnt
[AP_DEVICES
])
688 struct zcrypt_card
*zc
;
689 struct zcrypt_queue
*zq
;
691 memset(reqcnt
, 0, sizeof(int) * AP_DEVICES
);
692 spin_lock(&zcrypt_list_lock
);
694 for_each_zcrypt_card(zc
) {
695 for_each_zcrypt_queue(zq
, zc
) {
696 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
698 spin_lock(&zq
->queue
->lock
);
699 reqcnt
[AP_QID_CARD(zq
->queue
->qid
)] =
700 zq
->queue
->total_request_count
;
701 spin_unlock(&zq
->queue
->lock
);
705 spin_unlock(&zcrypt_list_lock
);
708 static int zcrypt_pendingq_count(void)
710 struct zcrypt_card
*zc
;
711 struct zcrypt_queue
*zq
;
715 spin_lock(&zcrypt_list_lock
);
717 for_each_zcrypt_card(zc
) {
718 for_each_zcrypt_queue(zq
, zc
) {
719 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
721 spin_lock(&zq
->queue
->lock
);
722 pendingq_count
+= zq
->queue
->pendingq_count
;
723 spin_unlock(&zq
->queue
->lock
);
727 spin_unlock(&zcrypt_list_lock
);
728 return pendingq_count
;
731 static int zcrypt_requestq_count(void)
733 struct zcrypt_card
*zc
;
734 struct zcrypt_queue
*zq
;
738 spin_lock(&zcrypt_list_lock
);
740 for_each_zcrypt_card(zc
) {
741 for_each_zcrypt_queue(zq
, zc
) {
742 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
744 spin_lock(&zq
->queue
->lock
);
745 requestq_count
+= zq
->queue
->requestq_count
;
746 spin_unlock(&zq
->queue
->lock
);
750 spin_unlock(&zcrypt_list_lock
);
751 return requestq_count
;
754 static int zcrypt_count_type(int type
)
756 struct zcrypt_card
*zc
;
757 struct zcrypt_queue
*zq
;
761 spin_lock(&zcrypt_list_lock
);
762 for_each_zcrypt_card(zc
) {
763 if (zc
->card
->id
!= type
)
765 for_each_zcrypt_queue(zq
, zc
) {
766 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
771 spin_unlock(&zcrypt_list_lock
);
776 * zcrypt_ica_status(): Old, depracted combi status call.
778 * Old, deprecated combi status call.
780 static long zcrypt_ica_status(struct file
*filp
, unsigned long arg
)
782 struct ica_z90_status
*pstat
;
785 pstat
= kzalloc(sizeof(*pstat
), GFP_KERNEL
);
788 pstat
->totalcount
= zcrypt_device_count
;
789 pstat
->leedslitecount
= zcrypt_count_type(ZCRYPT_PCICA
);
790 pstat
->leeds2count
= zcrypt_count_type(ZCRYPT_PCICC
);
791 pstat
->requestqWaitCount
= zcrypt_requestq_count();
792 pstat
->pendingqWaitCount
= zcrypt_pendingq_count();
793 pstat
->totalOpenCount
= atomic_read(&zcrypt_open_count
);
794 pstat
->cryptoDomain
= ap_domain_index
;
795 zcrypt_status_mask(pstat
->status
);
796 zcrypt_qdepth_mask(pstat
->qdepth
);
798 if (copy_to_user((void __user
*) arg
, pstat
, sizeof(*pstat
)))
804 static long zcrypt_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
810 case ICARSAMODEXPO
: {
811 struct ica_rsa_modexpo __user
*umex
= (void __user
*) arg
;
812 struct ica_rsa_modexpo mex
;
813 if (copy_from_user(&mex
, umex
, sizeof(mex
)))
816 rc
= zcrypt_rsa_modexpo(&mex
);
817 } while (rc
== -EAGAIN
);
818 /* on failure: retry once again after a requested rescan */
819 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
821 rc
= zcrypt_rsa_modexpo(&mex
);
822 } while (rc
== -EAGAIN
);
825 return put_user(mex
.outputdatalength
, &umex
->outputdatalength
);
828 struct ica_rsa_modexpo_crt __user
*ucrt
= (void __user
*) arg
;
829 struct ica_rsa_modexpo_crt crt
;
830 if (copy_from_user(&crt
, ucrt
, sizeof(crt
)))
833 rc
= zcrypt_rsa_crt(&crt
);
834 } while (rc
== -EAGAIN
);
835 /* on failure: retry once again after a requested rescan */
836 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
838 rc
= zcrypt_rsa_crt(&crt
);
839 } while (rc
== -EAGAIN
);
842 return put_user(crt
.outputdatalength
, &ucrt
->outputdatalength
);
845 struct ica_xcRB __user
*uxcRB
= (void __user
*) arg
;
846 struct ica_xcRB xcRB
;
847 if (copy_from_user(&xcRB
, uxcRB
, sizeof(xcRB
)))
850 rc
= zcrypt_send_cprb(&xcRB
);
851 } while (rc
== -EAGAIN
);
852 /* on failure: retry once again after a requested rescan */
853 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
855 rc
= zcrypt_send_cprb(&xcRB
);
856 } while (rc
== -EAGAIN
);
857 if (copy_to_user(uxcRB
, &xcRB
, sizeof(xcRB
)))
861 case ZSENDEP11CPRB
: {
862 struct ep11_urb __user
*uxcrb
= (void __user
*)arg
;
863 struct ep11_urb xcrb
;
864 if (copy_from_user(&xcrb
, uxcrb
, sizeof(xcrb
)))
867 rc
= zcrypt_send_ep11_cprb(&xcrb
);
868 } while (rc
== -EAGAIN
);
869 /* on failure: retry once again after a requested rescan */
870 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
872 rc
= zcrypt_send_ep11_cprb(&xcrb
);
873 } while (rc
== -EAGAIN
);
874 if (copy_to_user(uxcrb
, &xcrb
, sizeof(xcrb
)))
878 case ZDEVICESTATUS
: {
879 struct zcrypt_device_matrix
*device_status
;
881 device_status
= kzalloc(sizeof(struct zcrypt_device_matrix
),
886 zcrypt_device_status_mask(device_status
);
888 if (copy_to_user((char __user
*) arg
, device_status
,
889 sizeof(struct zcrypt_device_matrix
))) {
890 kfree(device_status
);
894 kfree(device_status
);
897 case Z90STAT_STATUS_MASK
: {
898 char status
[AP_DEVICES
];
899 zcrypt_status_mask(status
);
900 if (copy_to_user((char __user
*) arg
, status
,
901 sizeof(char) * AP_DEVICES
))
905 case Z90STAT_QDEPTH_MASK
: {
906 char qdepth
[AP_DEVICES
];
907 zcrypt_qdepth_mask(qdepth
);
908 if (copy_to_user((char __user
*) arg
, qdepth
,
909 sizeof(char) * AP_DEVICES
))
913 case Z90STAT_PERDEV_REQCNT
: {
914 int reqcnt
[AP_DEVICES
];
915 zcrypt_perdev_reqcnt(reqcnt
);
916 if (copy_to_user((int __user
*) arg
, reqcnt
,
917 sizeof(int) * AP_DEVICES
))
921 case Z90STAT_REQUESTQ_COUNT
:
922 return put_user(zcrypt_requestq_count(), (int __user
*) arg
);
923 case Z90STAT_PENDINGQ_COUNT
:
924 return put_user(zcrypt_pendingq_count(), (int __user
*) arg
);
925 case Z90STAT_TOTALOPEN_COUNT
:
926 return put_user(atomic_read(&zcrypt_open_count
),
928 case Z90STAT_DOMAIN_INDEX
:
929 return put_user(ap_domain_index
, (int __user
*) arg
);
931 * Deprecated ioctls. Don't add another device count ioctl,
932 * you can count them yourself in the user space with the
933 * output of the Z90STAT_STATUS_MASK ioctl.
936 return zcrypt_ica_status(filp
, arg
);
937 case Z90STAT_TOTALCOUNT
:
938 return put_user(zcrypt_device_count
, (int __user
*) arg
);
939 case Z90STAT_PCICACOUNT
:
940 return put_user(zcrypt_count_type(ZCRYPT_PCICA
),
942 case Z90STAT_PCICCCOUNT
:
943 return put_user(zcrypt_count_type(ZCRYPT_PCICC
),
945 case Z90STAT_PCIXCCMCL2COUNT
:
946 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2
),
948 case Z90STAT_PCIXCCMCL3COUNT
:
949 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3
),
951 case Z90STAT_PCIXCCCOUNT
:
952 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2
) +
953 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3
),
955 case Z90STAT_CEX2CCOUNT
:
956 return put_user(zcrypt_count_type(ZCRYPT_CEX2C
),
958 case Z90STAT_CEX2ACOUNT
:
959 return put_user(zcrypt_count_type(ZCRYPT_CEX2A
),
962 /* unknown ioctl number */
969 * ioctl32 conversion routines
971 struct compat_ica_rsa_modexpo
{
972 compat_uptr_t inputdata
;
973 unsigned int inputdatalength
;
974 compat_uptr_t outputdata
;
975 unsigned int outputdatalength
;
977 compat_uptr_t n_modulus
;
980 static long trans_modexpo32(struct file
*filp
, unsigned int cmd
,
983 struct compat_ica_rsa_modexpo __user
*umex32
= compat_ptr(arg
);
984 struct compat_ica_rsa_modexpo mex32
;
985 struct ica_rsa_modexpo mex64
;
988 if (copy_from_user(&mex32
, umex32
, sizeof(mex32
)))
990 mex64
.inputdata
= compat_ptr(mex32
.inputdata
);
991 mex64
.inputdatalength
= mex32
.inputdatalength
;
992 mex64
.outputdata
= compat_ptr(mex32
.outputdata
);
993 mex64
.outputdatalength
= mex32
.outputdatalength
;
994 mex64
.b_key
= compat_ptr(mex32
.b_key
);
995 mex64
.n_modulus
= compat_ptr(mex32
.n_modulus
);
997 rc
= zcrypt_rsa_modexpo(&mex64
);
998 } while (rc
== -EAGAIN
);
999 /* on failure: retry once again after a requested rescan */
1000 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1002 rc
= zcrypt_rsa_modexpo(&mex64
);
1003 } while (rc
== -EAGAIN
);
1006 return put_user(mex64
.outputdatalength
,
1007 &umex32
->outputdatalength
);
1010 struct compat_ica_rsa_modexpo_crt
{
1011 compat_uptr_t inputdata
;
1012 unsigned int inputdatalength
;
1013 compat_uptr_t outputdata
;
1014 unsigned int outputdatalength
;
1015 compat_uptr_t bp_key
;
1016 compat_uptr_t bq_key
;
1017 compat_uptr_t np_prime
;
1018 compat_uptr_t nq_prime
;
1019 compat_uptr_t u_mult_inv
;
1022 static long trans_modexpo_crt32(struct file
*filp
, unsigned int cmd
,
1025 struct compat_ica_rsa_modexpo_crt __user
*ucrt32
= compat_ptr(arg
);
1026 struct compat_ica_rsa_modexpo_crt crt32
;
1027 struct ica_rsa_modexpo_crt crt64
;
1030 if (copy_from_user(&crt32
, ucrt32
, sizeof(crt32
)))
1032 crt64
.inputdata
= compat_ptr(crt32
.inputdata
);
1033 crt64
.inputdatalength
= crt32
.inputdatalength
;
1034 crt64
.outputdata
= compat_ptr(crt32
.outputdata
);
1035 crt64
.outputdatalength
= crt32
.outputdatalength
;
1036 crt64
.bp_key
= compat_ptr(crt32
.bp_key
);
1037 crt64
.bq_key
= compat_ptr(crt32
.bq_key
);
1038 crt64
.np_prime
= compat_ptr(crt32
.np_prime
);
1039 crt64
.nq_prime
= compat_ptr(crt32
.nq_prime
);
1040 crt64
.u_mult_inv
= compat_ptr(crt32
.u_mult_inv
);
1042 rc
= zcrypt_rsa_crt(&crt64
);
1043 } while (rc
== -EAGAIN
);
1044 /* on failure: retry once again after a requested rescan */
1045 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1047 rc
= zcrypt_rsa_crt(&crt64
);
1048 } while (rc
== -EAGAIN
);
1051 return put_user(crt64
.outputdatalength
,
1052 &ucrt32
->outputdatalength
);
1055 struct compat_ica_xcRB
{
1056 unsigned short agent_ID
;
1057 unsigned int user_defined
;
1058 unsigned short request_ID
;
1059 unsigned int request_control_blk_length
;
1060 unsigned char padding1
[16 - sizeof (compat_uptr_t
)];
1061 compat_uptr_t request_control_blk_addr
;
1062 unsigned int request_data_length
;
1063 char padding2
[16 - sizeof (compat_uptr_t
)];
1064 compat_uptr_t request_data_address
;
1065 unsigned int reply_control_blk_length
;
1066 char padding3
[16 - sizeof (compat_uptr_t
)];
1067 compat_uptr_t reply_control_blk_addr
;
1068 unsigned int reply_data_length
;
1069 char padding4
[16 - sizeof (compat_uptr_t
)];
1070 compat_uptr_t reply_data_addr
;
1071 unsigned short priority_window
;
1072 unsigned int status
;
1073 } __attribute__((packed
));
1075 static long trans_xcRB32(struct file
*filp
, unsigned int cmd
,
1078 struct compat_ica_xcRB __user
*uxcRB32
= compat_ptr(arg
);
1079 struct compat_ica_xcRB xcRB32
;
1080 struct ica_xcRB xcRB64
;
1083 if (copy_from_user(&xcRB32
, uxcRB32
, sizeof(xcRB32
)))
1085 xcRB64
.agent_ID
= xcRB32
.agent_ID
;
1086 xcRB64
.user_defined
= xcRB32
.user_defined
;
1087 xcRB64
.request_ID
= xcRB32
.request_ID
;
1088 xcRB64
.request_control_blk_length
=
1089 xcRB32
.request_control_blk_length
;
1090 xcRB64
.request_control_blk_addr
=
1091 compat_ptr(xcRB32
.request_control_blk_addr
);
1092 xcRB64
.request_data_length
=
1093 xcRB32
.request_data_length
;
1094 xcRB64
.request_data_address
=
1095 compat_ptr(xcRB32
.request_data_address
);
1096 xcRB64
.reply_control_blk_length
=
1097 xcRB32
.reply_control_blk_length
;
1098 xcRB64
.reply_control_blk_addr
=
1099 compat_ptr(xcRB32
.reply_control_blk_addr
);
1100 xcRB64
.reply_data_length
= xcRB32
.reply_data_length
;
1101 xcRB64
.reply_data_addr
=
1102 compat_ptr(xcRB32
.reply_data_addr
);
1103 xcRB64
.priority_window
= xcRB32
.priority_window
;
1104 xcRB64
.status
= xcRB32
.status
;
1106 rc
= zcrypt_send_cprb(&xcRB64
);
1107 } while (rc
== -EAGAIN
);
1108 /* on failure: retry once again after a requested rescan */
1109 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1111 rc
= zcrypt_send_cprb(&xcRB64
);
1112 } while (rc
== -EAGAIN
);
1113 xcRB32
.reply_control_blk_length
= xcRB64
.reply_control_blk_length
;
1114 xcRB32
.reply_data_length
= xcRB64
.reply_data_length
;
1115 xcRB32
.status
= xcRB64
.status
;
1116 if (copy_to_user(uxcRB32
, &xcRB32
, sizeof(xcRB32
)))
1121 static long zcrypt_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1124 if (cmd
== ICARSAMODEXPO
)
1125 return trans_modexpo32(filp
, cmd
, arg
);
1126 if (cmd
== ICARSACRT
)
1127 return trans_modexpo_crt32(filp
, cmd
, arg
);
1128 if (cmd
== ZSECSENDCPRB
)
1129 return trans_xcRB32(filp
, cmd
, arg
);
1130 return zcrypt_unlocked_ioctl(filp
, cmd
, arg
);
1135 * Misc device file operations.
1137 static const struct file_operations zcrypt_fops
= {
1138 .owner
= THIS_MODULE
,
1139 .read
= zcrypt_read
,
1140 .write
= zcrypt_write
,
1141 .unlocked_ioctl
= zcrypt_unlocked_ioctl
,
1142 #ifdef CONFIG_COMPAT
1143 .compat_ioctl
= zcrypt_compat_ioctl
,
1145 .open
= zcrypt_open
,
1146 .release
= zcrypt_release
,
1147 .llseek
= no_llseek
,
1153 static struct miscdevice zcrypt_misc_device
= {
1154 .minor
= MISC_DYNAMIC_MINOR
,
1156 .fops
= &zcrypt_fops
,
1160 * Deprecated /proc entry support.
1162 static struct proc_dir_entry
*zcrypt_entry
;
1164 static void sprintcl(struct seq_file
*m
, unsigned char *addr
, unsigned int len
)
1168 for (i
= 0; i
< len
; i
++)
1169 seq_printf(m
, "%01x", (unsigned int) addr
[i
]);
1173 static void sprintrw(struct seq_file
*m
, unsigned char *addr
, unsigned int len
)
1179 for (c
= 0; c
< (len
/ 16); c
++) {
1180 sprintcl(m
, addr
+inl
, 16);
1185 sprintcl(m
, addr
+inl
, cx
);
1191 static void sprinthx(unsigned char *title
, struct seq_file
*m
,
1192 unsigned char *addr
, unsigned int len
)
1196 seq_printf(m
, "\n%s\n", title
);
1198 for (r
= 0; r
< (len
/ 64); r
++) {
1199 sprintrw(m
, addr
+inl
, 64);
1204 sprintrw(m
, addr
+inl
, rx
);
1210 static void sprinthx4(unsigned char *title
, struct seq_file
*m
,
1211 unsigned int *array
, unsigned int len
)
1213 seq_printf(m
, "\n%s\n", title
);
1214 seq_hex_dump(m
, " ", DUMP_PREFIX_NONE
, 32, 4, array
, len
, false);
1218 static int zcrypt_proc_show(struct seq_file
*m
, void *v
)
1220 char workarea
[sizeof(int) * AP_DEVICES
];
1222 seq_printf(m
, "\nzcrypt version: %d.%d.%d\n",
1223 ZCRYPT_VERSION
, ZCRYPT_RELEASE
, ZCRYPT_VARIANT
);
1224 seq_printf(m
, "Cryptographic domain: %d\n", ap_domain_index
);
1225 seq_printf(m
, "Total device count: %d\n", zcrypt_device_count
);
1226 seq_printf(m
, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA
));
1227 seq_printf(m
, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC
));
1228 seq_printf(m
, "PCIXCC MCL2 count: %d\n",
1229 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2
));
1230 seq_printf(m
, "PCIXCC MCL3 count: %d\n",
1231 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3
));
1232 seq_printf(m
, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C
));
1233 seq_printf(m
, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A
));
1234 seq_printf(m
, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C
));
1235 seq_printf(m
, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A
));
1236 seq_printf(m
, "requestq count: %d\n", zcrypt_requestq_count());
1237 seq_printf(m
, "pendingq count: %d\n", zcrypt_pendingq_count());
1238 seq_printf(m
, "Total open handles: %d\n\n",
1239 atomic_read(&zcrypt_open_count
));
1240 zcrypt_status_mask(workarea
);
1241 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1242 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1243 m
, workarea
, AP_DEVICES
);
1244 zcrypt_qdepth_mask(workarea
);
1245 sprinthx("Waiting work element counts", m
, workarea
, AP_DEVICES
);
1246 zcrypt_perdev_reqcnt((int *) workarea
);
1247 sprinthx4("Per-device successfully completed request counts",
1248 m
, (unsigned int *) workarea
, AP_DEVICES
);
1252 static int zcrypt_proc_open(struct inode
*inode
, struct file
*file
)
1254 return single_open(file
, zcrypt_proc_show
, NULL
);
1257 static void zcrypt_disable_card(int index
)
1259 struct zcrypt_card
*zc
;
1260 struct zcrypt_queue
*zq
;
1262 spin_lock(&zcrypt_list_lock
);
1263 for_each_zcrypt_card(zc
) {
1264 for_each_zcrypt_queue(zq
, zc
) {
1265 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
1268 ap_flush_queue(zq
->queue
);
1271 spin_unlock(&zcrypt_list_lock
);
1274 static void zcrypt_enable_card(int index
)
1276 struct zcrypt_card
*zc
;
1277 struct zcrypt_queue
*zq
;
1279 spin_lock(&zcrypt_list_lock
);
1280 for_each_zcrypt_card(zc
) {
1281 for_each_zcrypt_queue(zq
, zc
) {
1282 if (AP_QID_QUEUE(zq
->queue
->qid
) != ap_domain_index
)
1285 ap_flush_queue(zq
->queue
);
1288 spin_unlock(&zcrypt_list_lock
);
1291 static ssize_t
zcrypt_proc_write(struct file
*file
, const char __user
*buffer
,
1292 size_t count
, loff_t
*pos
)
1294 unsigned char *lbuf
, *ptr
;
1301 #define LBUFSIZE 1200UL
1302 lbuf
= kmalloc(LBUFSIZE
, GFP_KERNEL
);
1306 local_count
= min(LBUFSIZE
- 1, count
);
1307 if (copy_from_user(lbuf
, buffer
, local_count
) != 0) {
1311 lbuf
[local_count
] = '\0';
1313 ptr
= strstr(lbuf
, "Online devices");
1316 ptr
= strstr(ptr
, "\n");
1321 if (strstr(ptr
, "Waiting work element counts") == NULL
)
1324 for (j
= 0; j
< 64 && *ptr
; ptr
++) {
1326 * '0' for no device, '1' for PCICA, '2' for PCICC,
1327 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1328 * '5' for CEX2C and '6' for CEX2A'
1329 * '7' for CEX3C and '8' for CEX3A
1331 if (*ptr
>= '0' && *ptr
<= '8')
1333 else if (*ptr
== 'd' || *ptr
== 'D')
1334 zcrypt_disable_card(j
++);
1335 else if (*ptr
== 'e' || *ptr
== 'E')
1336 zcrypt_enable_card(j
++);
1337 else if (*ptr
!= ' ' && *ptr
!= '\t')
1345 static const struct file_operations zcrypt_proc_fops
= {
1346 .owner
= THIS_MODULE
,
1347 .open
= zcrypt_proc_open
,
1349 .llseek
= seq_lseek
,
1350 .release
= single_release
,
1351 .write
= zcrypt_proc_write
,
1354 static int zcrypt_rng_device_count
;
1355 static u32
*zcrypt_rng_buffer
;
1356 static int zcrypt_rng_buffer_index
;
1357 static DEFINE_MUTEX(zcrypt_rng_mutex
);
1359 static int zcrypt_rng_data_read(struct hwrng
*rng
, u32
*data
)
1364 * We don't need locking here because the RNG API guarantees serialized
1365 * read method calls.
1367 if (zcrypt_rng_buffer_index
== 0) {
1368 rc
= zcrypt_rng((char *) zcrypt_rng_buffer
);
1369 /* on failure: retry once again after a requested rescan */
1370 if ((rc
== -ENODEV
) && (zcrypt_process_rescan()))
1371 rc
= zcrypt_rng((char *) zcrypt_rng_buffer
);
1374 zcrypt_rng_buffer_index
= rc
/ sizeof *data
;
1376 *data
= zcrypt_rng_buffer
[--zcrypt_rng_buffer_index
];
1377 return sizeof *data
;
1380 static struct hwrng zcrypt_rng_dev
= {
1382 .data_read
= zcrypt_rng_data_read
,
1386 int zcrypt_rng_device_add(void)
1390 mutex_lock(&zcrypt_rng_mutex
);
1391 if (zcrypt_rng_device_count
== 0) {
1392 zcrypt_rng_buffer
= (u32
*) get_zeroed_page(GFP_KERNEL
);
1393 if (!zcrypt_rng_buffer
) {
1397 zcrypt_rng_buffer_index
= 0;
1398 if (!zcrypt_hwrng_seed
)
1399 zcrypt_rng_dev
.quality
= 0;
1400 rc
= hwrng_register(&zcrypt_rng_dev
);
1403 zcrypt_rng_device_count
= 1;
1405 zcrypt_rng_device_count
++;
1406 mutex_unlock(&zcrypt_rng_mutex
);
1410 free_page((unsigned long) zcrypt_rng_buffer
);
1412 mutex_unlock(&zcrypt_rng_mutex
);
1416 void zcrypt_rng_device_remove(void)
1418 mutex_lock(&zcrypt_rng_mutex
);
1419 zcrypt_rng_device_count
--;
1420 if (zcrypt_rng_device_count
== 0) {
1421 hwrng_unregister(&zcrypt_rng_dev
);
1422 free_page((unsigned long) zcrypt_rng_buffer
);
1424 mutex_unlock(&zcrypt_rng_mutex
);
1427 int __init
zcrypt_debug_init(void)
1429 zcrypt_dbf_info
= debug_register("zcrypt", 1, 1,
1430 DBF_MAX_SPRINTF_ARGS
* sizeof(long));
1431 debug_register_view(zcrypt_dbf_info
, &debug_sprintf_view
);
1432 debug_set_level(zcrypt_dbf_info
, DBF_ERR
);
1437 void zcrypt_debug_exit(void)
1439 debug_unregister(zcrypt_dbf_info
);
1443 * zcrypt_api_init(): Module initialization.
1445 * The module initialization code.
1447 int __init
zcrypt_api_init(void)
1451 rc
= zcrypt_debug_init();
1455 atomic_set(&zcrypt_rescan_req
, 0);
1457 /* Register the request sprayer. */
1458 rc
= misc_register(&zcrypt_misc_device
);
1462 /* Set up the proc file system */
1463 zcrypt_entry
= proc_create("driver/z90crypt", 0644, NULL
,
1465 if (!zcrypt_entry
) {
1470 zcrypt_msgtype6_init();
1471 zcrypt_msgtype50_init();
1475 misc_deregister(&zcrypt_misc_device
);
1481 * zcrypt_api_exit(): Module termination.
1483 * The module termination code.
1485 void __exit
zcrypt_api_exit(void)
1487 remove_proc_entry("driver/z90crypt", NULL
);
1488 misc_deregister(&zcrypt_misc_device
);
1489 zcrypt_msgtype6_exit();
1490 zcrypt_msgtype50_exit();
1491 zcrypt_debug_exit();
1494 module_init(zcrypt_api_init
);
1495 module_exit(zcrypt_api_exit
);