]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/s390/crypto/z90main.c
[PATCH] s390: CEX2A crt message length
[mirror_ubuntu-eoan-kernel.git] / drivers / s390 / crypto / z90main.c
1 /*
2 * linux/drivers/s390/crypto/z90main.c
3 *
4 * z90crypt 1.3.3
5 *
6 * Copyright (C) 2001, 2005 IBM Corporation
7 * Author(s): Robert Burroughs (burrough@us.ibm.com)
8 * Eric Rossman (edrossma@us.ibm.com)
9 *
10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 */
26
27 #include <asm/uaccess.h> // copy_(from|to)_user
28 #include <linux/compat.h>
29 #include <linux/compiler.h>
30 #include <linux/delay.h> // mdelay
31 #include <linux/init.h>
32 #include <linux/interrupt.h> // for tasklets
33 #include <linux/miscdevice.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/proc_fs.h>
37 #include <linux/syscalls.h>
38 #include "z90crypt.h"
39 #include "z90common.h"
40
41 /**
42 * Defaults that may be modified.
43 */
44
45 /**
46 * You can specify a different minor at compile time.
47 */
48 #ifndef Z90CRYPT_MINOR
49 #define Z90CRYPT_MINOR MISC_DYNAMIC_MINOR
50 #endif
51
52 /**
53 * You can specify a different domain at compile time or on the insmod
54 * command line.
55 */
56 #ifndef DOMAIN_INDEX
57 #define DOMAIN_INDEX -1
58 #endif
59
60 /**
61 * This is the name under which the device is registered in /proc/modules.
62 */
63 #define REG_NAME "z90crypt"
64
65 /**
66 * Cleanup should run every CLEANUPTIME seconds and should clean up requests
67 * older than CLEANUPTIME seconds in the past.
68 */
69 #ifndef CLEANUPTIME
70 #define CLEANUPTIME 15
71 #endif
72
73 /**
74 * Config should run every CONFIGTIME seconds
75 */
76 #ifndef CONFIGTIME
77 #define CONFIGTIME 30
78 #endif
79
80 /**
81 * The first execution of the config task should take place
82 * immediately after initialization
83 */
84 #ifndef INITIAL_CONFIGTIME
85 #define INITIAL_CONFIGTIME 1
86 #endif
87
88 /**
89 * Reader should run every READERTIME milliseconds
90 * With the 100Hz patch for s390, z90crypt can lock the system solid while
91 * under heavy load. We'll try to avoid that.
92 */
93 #ifndef READERTIME
94 #if HZ > 1000
95 #define READERTIME 2
96 #else
97 #define READERTIME 10
98 #endif
99 #endif
100
101 /**
102 * turn long device array index into device pointer
103 */
104 #define LONG2DEVPTR(ndx) (z90crypt.device_p[(ndx)])
105
106 /**
107 * turn short device array index into long device array index
108 */
109 #define SHRT2LONG(ndx) (z90crypt.overall_device_x.device_index[(ndx)])
110
111 /**
112 * turn short device array index into device pointer
113 */
114 #define SHRT2DEVPTR(ndx) LONG2DEVPTR(SHRT2LONG(ndx))
115
116 /**
117 * Status for a work-element
118 */
119 #define STAT_DEFAULT 0x00 // request has not been processed
120
121 #define STAT_ROUTED 0x80 // bit 7: requests get routed to specific device
122 // else, device is determined each write
123 #define STAT_FAILED 0x40 // bit 6: this bit is set if the request failed
124 // before being sent to the hardware.
125 #define STAT_WRITTEN 0x30 // bits 5-4: work to be done, not sent to device
126 // 0x20 // UNUSED state
127 #define STAT_READPEND 0x10 // bits 5-4: work done, we're returning data now
128 #define STAT_NOWORK 0x00 // bits off: no work on any queue
129 #define STAT_RDWRMASK 0x30 // mask for bits 5-4
130
131 /**
132 * Macros to check the status RDWRMASK
133 */
134 #define CHK_RDWRMASK(statbyte) ((statbyte) & STAT_RDWRMASK)
135 #define SET_RDWRMASK(statbyte, newval) \
136 {(statbyte) &= ~STAT_RDWRMASK; (statbyte) |= newval;}
137
138 /**
139 * Audit Trail. Progress of a Work element
140 * audit[0]: Unless noted otherwise, these bits are all set by the process
141 */
142 #define FP_COPYFROM 0x80 // Caller's buffer has been copied to work element
143 #define FP_BUFFREQ 0x40 // Low Level buffer requested
144 #define FP_BUFFGOT 0x20 // Low Level buffer obtained
145 #define FP_SENT 0x10 // Work element sent to a crypto device
146 // (may be set by process or by reader task)
147 #define FP_PENDING 0x08 // Work element placed on pending queue
148 // (may be set by process or by reader task)
149 #define FP_REQUEST 0x04 // Work element placed on request queue
150 #define FP_ASLEEP 0x02 // Work element about to sleep
151 #define FP_AWAKE 0x01 // Work element has been awakened
152
153 /**
154 * audit[1]: These bits are set by the reader task and/or the cleanup task
155 */
156 #define FP_NOTPENDING 0x80 // Work element removed from pending queue
157 #define FP_AWAKENING 0x40 // Caller about to be awakened
158 #define FP_TIMEDOUT 0x20 // Caller timed out
159 #define FP_RESPSIZESET 0x10 // Response size copied to work element
160 #define FP_RESPADDRCOPIED 0x08 // Response address copied to work element
161 #define FP_RESPBUFFCOPIED 0x04 // Response buffer copied to work element
162 #define FP_REMREQUEST 0x02 // Work element removed from request queue
163 #define FP_SIGNALED 0x01 // Work element was awakened by a signal
164
165 /**
166 * audit[2]: unused
167 */
168
169 /**
170 * state of the file handle in private_data.status
171 */
172 #define STAT_OPEN 0
173 #define STAT_CLOSED 1
174
175 /**
176 * PID() expands to the process ID of the current process
177 */
178 #define PID() (current->pid)
179
180 /**
181 * Selected Constants. The number of APs and the number of devices
182 */
183 #ifndef Z90CRYPT_NUM_APS
184 #define Z90CRYPT_NUM_APS 64
185 #endif
186 #ifndef Z90CRYPT_NUM_DEVS
187 #define Z90CRYPT_NUM_DEVS Z90CRYPT_NUM_APS
188 #endif
189
190 /**
191 * Buffer size for receiving responses. The maximum Response Size
192 * is actually the maximum request size, since in an error condition
193 * the request itself may be returned unchanged.
194 */
195 #define MAX_RESPONSE_SIZE 0x0000077C
196
197 /**
198 * A count and status-byte mask
199 */
200 struct status {
201 int st_count; // # of enabled devices
202 int disabled_count; // # of disabled devices
203 int user_disabled_count; // # of devices disabled via proc fs
204 unsigned char st_mask[Z90CRYPT_NUM_APS]; // current status mask
205 };
206
207 /**
208 * The array of device indexes is a mechanism for fast indexing into
209 * a long (and sparse) array. For instance, if APs 3, 9 and 47 are
210 * installed, z90CDeviceIndex[0] is 3, z90CDeviceIndex[1] is 9, and
211 * z90CDeviceIndex[2] is 47.
212 */
213 struct device_x {
214 int device_index[Z90CRYPT_NUM_DEVS];
215 };
216
217 /**
218 * All devices are arranged in a single array: 64 APs
219 */
220 struct device {
221 int dev_type; // PCICA, PCICC, PCIXCC_MCL2,
222 // PCIXCC_MCL3, CEX2C, CEX2A
223 enum devstat dev_stat; // current device status
224 int dev_self_x; // Index in array
225 int disabled; // Set when device is in error
226 int user_disabled; // Set when device is disabled by user
227 int dev_q_depth; // q depth
228 unsigned char * dev_resp_p; // Response buffer address
229 int dev_resp_l; // Response Buffer length
230 int dev_caller_count; // Number of callers
231 int dev_total_req_cnt; // # requests for device since load
232 struct list_head dev_caller_list; // List of callers
233 };
234
235 /**
236 * There's a struct status and a struct device_x for each device type.
237 */
238 struct hdware_block {
239 struct status hdware_mask;
240 struct status type_mask[Z90CRYPT_NUM_TYPES];
241 struct device_x type_x_addr[Z90CRYPT_NUM_TYPES];
242 unsigned char device_type_array[Z90CRYPT_NUM_APS];
243 };
244
245 /**
246 * z90crypt is the topmost data structure in the hierarchy.
247 */
248 struct z90crypt {
249 int max_count; // Nr of possible crypto devices
250 struct status mask;
251 int q_depth_array[Z90CRYPT_NUM_DEVS];
252 int dev_type_array[Z90CRYPT_NUM_DEVS];
253 struct device_x overall_device_x; // array device indexes
254 struct device * device_p[Z90CRYPT_NUM_DEVS];
255 int terminating;
256 int domain_established;// TRUE: domain has been found
257 int cdx; // Crypto Domain Index
258 int len; // Length of this data structure
259 struct hdware_block *hdware_info;
260 };
261
262 /**
263 * An array of these structures is pointed to from dev_caller
264 * The length of the array depends on the device type. For APs,
265 * there are 8.
266 *
267 * The caller buffer is allocated to the user at OPEN. At WRITE,
268 * it contains the request; at READ, the response. The function
269 * send_to_crypto_device converts the request to device-dependent
270 * form and use the caller's OPEN-allocated buffer for the response.
271 *
272 * For the contents of caller_dev_dep_req and caller_dev_dep_req_p
273 * because that points to it, see the discussion in z90hardware.c.
274 * Search for "extended request message block".
275 */
276 struct caller {
277 int caller_buf_l; // length of original request
278 unsigned char * caller_buf_p; // Original request on WRITE
279 int caller_dev_dep_req_l; // len device dependent request
280 unsigned char * caller_dev_dep_req_p; // Device dependent form
281 unsigned char caller_id[8]; // caller-supplied message id
282 struct list_head caller_liste;
283 unsigned char caller_dev_dep_req[MAX_RESPONSE_SIZE];
284 };
285
286 /**
287 * Function prototypes from z90hardware.c
288 */
289 enum hdstat query_online(int deviceNr, int cdx, int resetNr, int *q_depth,
290 int *dev_type);
291 enum devstat reset_device(int deviceNr, int cdx, int resetNr);
292 enum devstat send_to_AP(int dev_nr, int cdx, int msg_len, unsigned char *msg_ext);
293 enum devstat receive_from_AP(int dev_nr, int cdx, int resplen,
294 unsigned char *resp, unsigned char *psmid);
295 int convert_request(unsigned char *buffer, int func, unsigned short function,
296 int cdx, int dev_type, int *msg_l_p, unsigned char *msg_p);
297 int convert_response(unsigned char *response, unsigned char *buffer,
298 int *respbufflen_p, unsigned char *resp_buff);
299
300 /**
301 * Low level function prototypes
302 */
303 static int create_z90crypt(int *cdx_p);
304 static int refresh_z90crypt(int *cdx_p);
305 static int find_crypto_devices(struct status *deviceMask);
306 static int create_crypto_device(int index);
307 static int destroy_crypto_device(int index);
308 static void destroy_z90crypt(void);
309 static int refresh_index_array(struct status *status_str,
310 struct device_x *index_array);
311 static int probe_device_type(struct device *devPtr);
312 static int probe_PCIXCC_type(struct device *devPtr);
313
314 /**
315 * proc fs definitions
316 */
317 static struct proc_dir_entry *z90crypt_entry;
318
319 /**
320 * data structures
321 */
322
323 /**
324 * work_element.opener points back to this structure
325 */
326 struct priv_data {
327 pid_t opener_pid;
328 unsigned char status; // 0: open 1: closed
329 };
330
331 /**
332 * A work element is allocated for each request
333 */
334 struct work_element {
335 struct priv_data *priv_data;
336 pid_t pid;
337 int devindex; // index of device processing this w_e
338 // (If request did not specify device,
339 // -1 until placed onto a queue)
340 int devtype;
341 struct list_head liste; // used for requestq and pendingq
342 char buffer[128]; // local copy of user request
343 int buff_size; // size of the buffer for the request
344 char resp_buff[RESPBUFFSIZE];
345 int resp_buff_size;
346 char __user * resp_addr; // address of response in user space
347 unsigned int funccode; // function code of request
348 wait_queue_head_t waitq;
349 unsigned long requestsent; // time at which the request was sent
350 atomic_t alarmrung; // wake-up signal
351 unsigned char caller_id[8]; // pid + counter, for this w_e
352 unsigned char status[1]; // bits to mark status of the request
353 unsigned char audit[3]; // record of work element's progress
354 unsigned char * requestptr; // address of request buffer
355 int retcode; // return code of request
356 };
357
358 /**
359 * High level function prototypes
360 */
361 static int z90crypt_open(struct inode *, struct file *);
362 static int z90crypt_release(struct inode *, struct file *);
363 static ssize_t z90crypt_read(struct file *, char __user *, size_t, loff_t *);
364 static ssize_t z90crypt_write(struct file *, const char __user *,
365 size_t, loff_t *);
366 static long z90crypt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
367 static long z90crypt_compat_ioctl(struct file *, unsigned int, unsigned long);
368
369 static void z90crypt_reader_task(unsigned long);
370 static void z90crypt_schedule_reader_task(unsigned long);
371 static void z90crypt_config_task(unsigned long);
372 static void z90crypt_cleanup_task(unsigned long);
373
374 static int z90crypt_status(char *, char **, off_t, int, int *, void *);
375 static int z90crypt_status_write(struct file *, const char __user *,
376 unsigned long, void *);
377
378 /**
379 * Storage allocated at initialization and used throughout the life of
380 * this insmod
381 */
382 static int domain = DOMAIN_INDEX;
383 static struct z90crypt z90crypt;
384 static int quiesce_z90crypt;
385 static spinlock_t queuespinlock;
386 static struct list_head request_list;
387 static int requestq_count;
388 static struct list_head pending_list;
389 static int pendingq_count;
390
391 static struct tasklet_struct reader_tasklet;
392 static struct timer_list reader_timer;
393 static struct timer_list config_timer;
394 static struct timer_list cleanup_timer;
395 static atomic_t total_open;
396 static atomic_t z90crypt_step;
397
398 static struct file_operations z90crypt_fops = {
399 .owner = THIS_MODULE,
400 .read = z90crypt_read,
401 .write = z90crypt_write,
402 .unlocked_ioctl = z90crypt_unlocked_ioctl,
403 #ifdef CONFIG_COMPAT
404 .compat_ioctl = z90crypt_compat_ioctl,
405 #endif
406 .open = z90crypt_open,
407 .release = z90crypt_release
408 };
409
410 static struct miscdevice z90crypt_misc_device = {
411 .minor = Z90CRYPT_MINOR,
412 .name = DEV_NAME,
413 .fops = &z90crypt_fops,
414 .devfs_name = DEV_NAME
415 };
416
417 /**
418 * Documentation values.
419 */
420 MODULE_AUTHOR("zSeries Linux Crypto Team: Robert H. Burroughs, Eric D. Rossman"
421 "and Jochen Roehrig");
422 MODULE_DESCRIPTION("zSeries Linux Cryptographic Coprocessor device driver, "
423 "Copyright 2001, 2005 IBM Corporation");
424 MODULE_LICENSE("GPL");
425 module_param(domain, int, 0);
426 MODULE_PARM_DESC(domain, "domain index for device");
427
428 #ifdef CONFIG_COMPAT
429 /**
430 * ioctl32 conversion routines
431 */
432 struct ica_rsa_modexpo_32 { // For 32-bit callers
433 compat_uptr_t inputdata;
434 unsigned int inputdatalength;
435 compat_uptr_t outputdata;
436 unsigned int outputdatalength;
437 compat_uptr_t b_key;
438 compat_uptr_t n_modulus;
439 };
440
441 static long
442 trans_modexpo32(struct file *filp, unsigned int cmd, unsigned long arg)
443 {
444 struct ica_rsa_modexpo_32 __user *mex32u = compat_ptr(arg);
445 struct ica_rsa_modexpo_32 mex32k;
446 struct ica_rsa_modexpo __user *mex64;
447 long ret = 0;
448 unsigned int i;
449
450 if (!access_ok(VERIFY_WRITE, mex32u, sizeof(struct ica_rsa_modexpo_32)))
451 return -EFAULT;
452 mex64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo));
453 if (!access_ok(VERIFY_WRITE, mex64, sizeof(struct ica_rsa_modexpo)))
454 return -EFAULT;
455 if (copy_from_user(&mex32k, mex32u, sizeof(struct ica_rsa_modexpo_32)))
456 return -EFAULT;
457 if (__put_user(compat_ptr(mex32k.inputdata), &mex64->inputdata) ||
458 __put_user(mex32k.inputdatalength, &mex64->inputdatalength) ||
459 __put_user(compat_ptr(mex32k.outputdata), &mex64->outputdata) ||
460 __put_user(mex32k.outputdatalength, &mex64->outputdatalength) ||
461 __put_user(compat_ptr(mex32k.b_key), &mex64->b_key) ||
462 __put_user(compat_ptr(mex32k.n_modulus), &mex64->n_modulus))
463 return -EFAULT;
464 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)mex64);
465 if (!ret)
466 if (__get_user(i, &mex64->outputdatalength) ||
467 __put_user(i, &mex32u->outputdatalength))
468 ret = -EFAULT;
469 return ret;
470 }
471
472 struct ica_rsa_modexpo_crt_32 { // For 32-bit callers
473 compat_uptr_t inputdata;
474 unsigned int inputdatalength;
475 compat_uptr_t outputdata;
476 unsigned int outputdatalength;
477 compat_uptr_t bp_key;
478 compat_uptr_t bq_key;
479 compat_uptr_t np_prime;
480 compat_uptr_t nq_prime;
481 compat_uptr_t u_mult_inv;
482 };
483
484 static long
485 trans_modexpo_crt32(struct file *filp, unsigned int cmd, unsigned long arg)
486 {
487 struct ica_rsa_modexpo_crt_32 __user *crt32u = compat_ptr(arg);
488 struct ica_rsa_modexpo_crt_32 crt32k;
489 struct ica_rsa_modexpo_crt __user *crt64;
490 long ret = 0;
491 unsigned int i;
492
493 if (!access_ok(VERIFY_WRITE, crt32u,
494 sizeof(struct ica_rsa_modexpo_crt_32)))
495 return -EFAULT;
496 crt64 = compat_alloc_user_space(sizeof(struct ica_rsa_modexpo_crt));
497 if (!access_ok(VERIFY_WRITE, crt64, sizeof(struct ica_rsa_modexpo_crt)))
498 return -EFAULT;
499 if (copy_from_user(&crt32k, crt32u,
500 sizeof(struct ica_rsa_modexpo_crt_32)))
501 return -EFAULT;
502 if (__put_user(compat_ptr(crt32k.inputdata), &crt64->inputdata) ||
503 __put_user(crt32k.inputdatalength, &crt64->inputdatalength) ||
504 __put_user(compat_ptr(crt32k.outputdata), &crt64->outputdata) ||
505 __put_user(crt32k.outputdatalength, &crt64->outputdatalength) ||
506 __put_user(compat_ptr(crt32k.bp_key), &crt64->bp_key) ||
507 __put_user(compat_ptr(crt32k.bq_key), &crt64->bq_key) ||
508 __put_user(compat_ptr(crt32k.np_prime), &crt64->np_prime) ||
509 __put_user(compat_ptr(crt32k.nq_prime), &crt64->nq_prime) ||
510 __put_user(compat_ptr(crt32k.u_mult_inv), &crt64->u_mult_inv))
511 return -EFAULT;
512 ret = z90crypt_unlocked_ioctl(filp, cmd, (unsigned long)crt64);
513 if (!ret)
514 if (__get_user(i, &crt64->outputdatalength) ||
515 __put_user(i, &crt32u->outputdatalength))
516 ret = -EFAULT;
517 return ret;
518 }
519
520 static long
521 z90crypt_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
522 {
523 switch (cmd) {
524 case ICAZ90STATUS:
525 case Z90QUIESCE:
526 case Z90STAT_TOTALCOUNT:
527 case Z90STAT_PCICACOUNT:
528 case Z90STAT_PCICCCOUNT:
529 case Z90STAT_PCIXCCCOUNT:
530 case Z90STAT_PCIXCCMCL2COUNT:
531 case Z90STAT_PCIXCCMCL3COUNT:
532 case Z90STAT_CEX2CCOUNT:
533 case Z90STAT_REQUESTQ_COUNT:
534 case Z90STAT_PENDINGQ_COUNT:
535 case Z90STAT_TOTALOPEN_COUNT:
536 case Z90STAT_DOMAIN_INDEX:
537 case Z90STAT_STATUS_MASK:
538 case Z90STAT_QDEPTH_MASK:
539 case Z90STAT_PERDEV_REQCNT:
540 return z90crypt_unlocked_ioctl(filp, cmd, arg);
541 case ICARSAMODEXPO:
542 return trans_modexpo32(filp, cmd, arg);
543 case ICARSACRT:
544 return trans_modexpo_crt32(filp, cmd, arg);
545 default:
546 return -ENOIOCTLCMD;
547 }
548 }
549 #endif
550
551 /**
552 * The module initialization code.
553 */
554 static int __init
555 z90crypt_init_module(void)
556 {
557 int result, nresult;
558 struct proc_dir_entry *entry;
559
560 PDEBUG("PID %d\n", PID());
561
562 if ((domain < -1) || (domain > 15)) {
563 PRINTKW("Invalid param: domain = %d. Not loading.\n", domain);
564 return -EINVAL;
565 }
566
567 /* Register as misc device with given minor (or get a dynamic one). */
568 result = misc_register(&z90crypt_misc_device);
569 if (result < 0) {
570 PRINTKW(KERN_ERR "misc_register (minor %d) failed with %d\n",
571 z90crypt_misc_device.minor, result);
572 return result;
573 }
574
575 PDEBUG("Registered " DEV_NAME " with result %d\n", result);
576
577 result = create_z90crypt(&domain);
578 if (result != 0) {
579 PRINTKW("create_z90crypt (domain index %d) failed with %d.\n",
580 domain, result);
581 result = -ENOMEM;
582 goto init_module_cleanup;
583 }
584
585 if (result == 0) {
586 PRINTKN("Version %d.%d.%d loaded, built on %s %s\n",
587 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT,
588 __DATE__, __TIME__);
589 PDEBUG("create_z90crypt (domain index %d) successful.\n",
590 domain);
591 } else
592 PRINTK("No devices at startup\n");
593
594 /* Initialize globals. */
595 spin_lock_init(&queuespinlock);
596
597 INIT_LIST_HEAD(&pending_list);
598 pendingq_count = 0;
599
600 INIT_LIST_HEAD(&request_list);
601 requestq_count = 0;
602
603 quiesce_z90crypt = 0;
604
605 atomic_set(&total_open, 0);
606 atomic_set(&z90crypt_step, 0);
607
608 /* Set up the cleanup task. */
609 init_timer(&cleanup_timer);
610 cleanup_timer.function = z90crypt_cleanup_task;
611 cleanup_timer.data = 0;
612 cleanup_timer.expires = jiffies + (CLEANUPTIME * HZ);
613 add_timer(&cleanup_timer);
614
615 /* Set up the proc file system */
616 entry = create_proc_entry("driver/z90crypt", 0644, 0);
617 if (entry) {
618 entry->nlink = 1;
619 entry->data = 0;
620 entry->read_proc = z90crypt_status;
621 entry->write_proc = z90crypt_status_write;
622 }
623 else
624 PRINTK("Couldn't create z90crypt proc entry\n");
625 z90crypt_entry = entry;
626
627 /* Set up the configuration task. */
628 init_timer(&config_timer);
629 config_timer.function = z90crypt_config_task;
630 config_timer.data = 0;
631 config_timer.expires = jiffies + (INITIAL_CONFIGTIME * HZ);
632 add_timer(&config_timer);
633
634 /* Set up the reader task */
635 tasklet_init(&reader_tasklet, z90crypt_reader_task, 0);
636 init_timer(&reader_timer);
637 reader_timer.function = z90crypt_schedule_reader_task;
638 reader_timer.data = 0;
639 reader_timer.expires = jiffies + (READERTIME * HZ / 1000);
640 add_timer(&reader_timer);
641
642 return 0; // success
643
644 init_module_cleanup:
645 if ((nresult = misc_deregister(&z90crypt_misc_device)))
646 PRINTK("misc_deregister failed with %d.\n", nresult);
647 else
648 PDEBUG("misc_deregister successful.\n");
649
650 return result; // failure
651 }
652
653 /**
654 * The module termination code
655 */
656 static void __exit
657 z90crypt_cleanup_module(void)
658 {
659 int nresult;
660
661 PDEBUG("PID %d\n", PID());
662
663 remove_proc_entry("driver/z90crypt", 0);
664
665 if ((nresult = misc_deregister(&z90crypt_misc_device)))
666 PRINTK("misc_deregister failed with %d.\n", nresult);
667 else
668 PDEBUG("misc_deregister successful.\n");
669
670 /* Remove the tasks */
671 tasklet_kill(&reader_tasklet);
672 del_timer(&reader_timer);
673 del_timer(&config_timer);
674 del_timer(&cleanup_timer);
675
676 destroy_z90crypt();
677
678 PRINTKN("Unloaded.\n");
679 }
680
681 /**
682 * Functions running under a process id
683 *
684 * The I/O functions:
685 * z90crypt_open
686 * z90crypt_release
687 * z90crypt_read
688 * z90crypt_write
689 * z90crypt_unlocked_ioctl
690 * z90crypt_status
691 * z90crypt_status_write
692 * disable_card
693 * enable_card
694 *
695 * Helper functions:
696 * z90crypt_rsa
697 * z90crypt_prepare
698 * z90crypt_send
699 * z90crypt_process_results
700 *
701 */
702 static int
703 z90crypt_open(struct inode *inode, struct file *filp)
704 {
705 struct priv_data *private_data_p;
706
707 if (quiesce_z90crypt)
708 return -EQUIESCE;
709
710 private_data_p = kmalloc(sizeof(struct priv_data), GFP_KERNEL);
711 if (!private_data_p) {
712 PRINTK("Memory allocate failed\n");
713 return -ENOMEM;
714 }
715
716 memset((void *)private_data_p, 0, sizeof(struct priv_data));
717 private_data_p->status = STAT_OPEN;
718 private_data_p->opener_pid = PID();
719 filp->private_data = private_data_p;
720 atomic_inc(&total_open);
721
722 return 0;
723 }
724
725 static int
726 z90crypt_release(struct inode *inode, struct file *filp)
727 {
728 struct priv_data *private_data_p = filp->private_data;
729
730 PDEBUG("PID %d (filp %p)\n", PID(), filp);
731
732 private_data_p->status = STAT_CLOSED;
733 memset(private_data_p, 0, sizeof(struct priv_data));
734 kfree(private_data_p);
735 atomic_dec(&total_open);
736
737 return 0;
738 }
739
740 /*
741 * there are two read functions, of which compile options will choose one
742 * without USE_GET_RANDOM_BYTES
743 * => read() always returns -EPERM;
744 * otherwise
745 * => read() uses get_random_bytes() kernel function
746 */
747 #ifndef USE_GET_RANDOM_BYTES
748 /**
749 * z90crypt_read will not be supported beyond z90crypt 1.3.1
750 */
751 static ssize_t
752 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
753 {
754 PDEBUG("filp %p (PID %d)\n", filp, PID());
755 return -EPERM;
756 }
757 #else // we want to use get_random_bytes
758 /**
759 * read() just returns a string of random bytes. Since we have no way
760 * to generate these cryptographically, we just execute get_random_bytes
761 * for the length specified.
762 */
763 #include <linux/random.h>
764 static ssize_t
765 z90crypt_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
766 {
767 unsigned char *temp_buff;
768
769 PDEBUG("filp %p (PID %d)\n", filp, PID());
770
771 if (quiesce_z90crypt)
772 return -EQUIESCE;
773 if (count < 0) {
774 PRINTK("Requested random byte count negative: %ld\n", count);
775 return -EINVAL;
776 }
777 if (count > RESPBUFFSIZE) {
778 PDEBUG("count[%d] > RESPBUFFSIZE", count);
779 return -EINVAL;
780 }
781 if (count == 0)
782 return 0;
783 temp_buff = kmalloc(RESPBUFFSIZE, GFP_KERNEL);
784 if (!temp_buff) {
785 PRINTK("Memory allocate failed\n");
786 return -ENOMEM;
787 }
788 get_random_bytes(temp_buff, count);
789
790 if (copy_to_user(buf, temp_buff, count) != 0) {
791 kfree(temp_buff);
792 return -EFAULT;
793 }
794 kfree(temp_buff);
795 return count;
796 }
797 #endif
798
799 /**
800 * Write is is not allowed
801 */
802 static ssize_t
803 z90crypt_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos)
804 {
805 PDEBUG("filp %p (PID %d)\n", filp, PID());
806 return -EPERM;
807 }
808
809 /**
810 * New status functions
811 */
812 static inline int
813 get_status_totalcount(void)
814 {
815 return z90crypt.hdware_info->hdware_mask.st_count;
816 }
817
818 static inline int
819 get_status_PCICAcount(void)
820 {
821 return z90crypt.hdware_info->type_mask[PCICA].st_count;
822 }
823
824 static inline int
825 get_status_PCICCcount(void)
826 {
827 return z90crypt.hdware_info->type_mask[PCICC].st_count;
828 }
829
830 static inline int
831 get_status_PCIXCCcount(void)
832 {
833 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count +
834 z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
835 }
836
837 static inline int
838 get_status_PCIXCCMCL2count(void)
839 {
840 return z90crypt.hdware_info->type_mask[PCIXCC_MCL2].st_count;
841 }
842
843 static inline int
844 get_status_PCIXCCMCL3count(void)
845 {
846 return z90crypt.hdware_info->type_mask[PCIXCC_MCL3].st_count;
847 }
848
849 static inline int
850 get_status_CEX2Ccount(void)
851 {
852 return z90crypt.hdware_info->type_mask[CEX2C].st_count;
853 }
854
855 static inline int
856 get_status_CEX2Acount(void)
857 {
858 return z90crypt.hdware_info->type_mask[CEX2A].st_count;
859 }
860
861 static inline int
862 get_status_requestq_count(void)
863 {
864 return requestq_count;
865 }
866
867 static inline int
868 get_status_pendingq_count(void)
869 {
870 return pendingq_count;
871 }
872
873 static inline int
874 get_status_totalopen_count(void)
875 {
876 return atomic_read(&total_open);
877 }
878
879 static inline int
880 get_status_domain_index(void)
881 {
882 return z90crypt.cdx;
883 }
884
885 static inline unsigned char *
886 get_status_status_mask(unsigned char status[Z90CRYPT_NUM_APS])
887 {
888 int i, ix;
889
890 memcpy(status, z90crypt.hdware_info->device_type_array,
891 Z90CRYPT_NUM_APS);
892
893 for (i = 0; i < get_status_totalcount(); i++) {
894 ix = SHRT2LONG(i);
895 if (LONG2DEVPTR(ix)->user_disabled)
896 status[ix] = 0x0d;
897 }
898
899 return status;
900 }
901
902 static inline unsigned char *
903 get_status_qdepth_mask(unsigned char qdepth[Z90CRYPT_NUM_APS])
904 {
905 int i, ix;
906
907 memset(qdepth, 0, Z90CRYPT_NUM_APS);
908
909 for (i = 0; i < get_status_totalcount(); i++) {
910 ix = SHRT2LONG(i);
911 qdepth[ix] = LONG2DEVPTR(ix)->dev_caller_count;
912 }
913
914 return qdepth;
915 }
916
917 static inline unsigned int *
918 get_status_perdevice_reqcnt(unsigned int reqcnt[Z90CRYPT_NUM_APS])
919 {
920 int i, ix;
921
922 memset(reqcnt, 0, Z90CRYPT_NUM_APS * sizeof(int));
923
924 for (i = 0; i < get_status_totalcount(); i++) {
925 ix = SHRT2LONG(i);
926 reqcnt[ix] = LONG2DEVPTR(ix)->dev_total_req_cnt;
927 }
928
929 return reqcnt;
930 }
931
932 static inline void
933 init_work_element(struct work_element *we_p,
934 struct priv_data *priv_data, pid_t pid)
935 {
936 int step;
937
938 we_p->requestptr = (unsigned char *)we_p + sizeof(struct work_element);
939 /* Come up with a unique id for this caller. */
940 step = atomic_inc_return(&z90crypt_step);
941 memcpy(we_p->caller_id+0, (void *) &pid, sizeof(pid));
942 memcpy(we_p->caller_id+4, (void *) &step, sizeof(step));
943 we_p->pid = pid;
944 we_p->priv_data = priv_data;
945 we_p->status[0] = STAT_DEFAULT;
946 we_p->audit[0] = 0x00;
947 we_p->audit[1] = 0x00;
948 we_p->audit[2] = 0x00;
949 we_p->resp_buff_size = 0;
950 we_p->retcode = 0;
951 we_p->devindex = -1;
952 we_p->devtype = -1;
953 atomic_set(&we_p->alarmrung, 0);
954 init_waitqueue_head(&we_p->waitq);
955 INIT_LIST_HEAD(&(we_p->liste));
956 }
957
958 static inline int
959 allocate_work_element(struct work_element **we_pp,
960 struct priv_data *priv_data_p, pid_t pid)
961 {
962 struct work_element *we_p;
963
964 we_p = (struct work_element *) get_zeroed_page(GFP_KERNEL);
965 if (!we_p)
966 return -ENOMEM;
967 init_work_element(we_p, priv_data_p, pid);
968 *we_pp = we_p;
969 return 0;
970 }
971
972 static inline void
973 remove_device(struct device *device_p)
974 {
975 if (!device_p || (device_p->disabled != 0))
976 return;
977 device_p->disabled = 1;
978 z90crypt.hdware_info->type_mask[device_p->dev_type].disabled_count++;
979 z90crypt.hdware_info->hdware_mask.disabled_count++;
980 }
981
982 /**
983 * Bitlength limits for each card
984 *
985 * There are new MCLs which allow more bitlengths. See the table for details.
986 * The MCL must be applied and the newer bitlengths enabled for these to work.
987 *
988 * Card Type Old limit New limit
989 * PCICA ??-2048 same (the lower limit is less than 128 bit...)
990 * PCICC 512-1024 512-2048
991 * PCIXCC_MCL2 512-2048 ----- (applying any GA LIC will make an MCL3 card)
992 * PCIXCC_MCL3 ----- 128-2048
993 * CEX2C 512-2048 128-2048
994 * CEX2A ??-2048 same (the lower limit is less than 128 bit...)
995 *
996 * ext_bitlens (extended bitlengths) is a global, since you should not apply an
997 * MCL to just one card in a machine. We assume, at first, that all cards have
998 * these capabilities.
999 */
1000 int ext_bitlens = 1; // This is global
1001 #define PCIXCC_MIN_MOD_SIZE 16 // 128 bits
1002 #define OLD_PCIXCC_MIN_MOD_SIZE 64 // 512 bits
1003 #define PCICC_MIN_MOD_SIZE 64 // 512 bits
1004 #define OLD_PCICC_MAX_MOD_SIZE 128 // 1024 bits
1005 #define MAX_MOD_SIZE 256 // 2048 bits
1006
1007 static inline int
1008 select_device_type(int *dev_type_p, int bytelength)
1009 {
1010 static int count = 0;
1011 int PCICA_avail, PCIXCC_MCL3_avail, CEX2C_avail, CEX2A_avail,
1012 index_to_use;
1013 struct status *stat;
1014 if ((*dev_type_p != PCICC) && (*dev_type_p != PCICA) &&
1015 (*dev_type_p != PCIXCC_MCL2) && (*dev_type_p != PCIXCC_MCL3) &&
1016 (*dev_type_p != CEX2C) && (*dev_type_p != CEX2A) &&
1017 (*dev_type_p != ANYDEV))
1018 return -1;
1019 if (*dev_type_p != ANYDEV) {
1020 stat = &z90crypt.hdware_info->type_mask[*dev_type_p];
1021 if (stat->st_count >
1022 (stat->disabled_count + stat->user_disabled_count))
1023 return 0;
1024 return -1;
1025 }
1026
1027 /**
1028 * Assumption: PCICA, PCIXCC_MCL3, CEX2C, and CEX2A are all similar in
1029 * speed.
1030 *
1031 * PCICA and CEX2A do NOT co-exist, so it would be either one or the
1032 * other present.
1033 */
1034 stat = &z90crypt.hdware_info->type_mask[PCICA];
1035 PCICA_avail = stat->st_count -
1036 (stat->disabled_count + stat->user_disabled_count);
1037 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL3];
1038 PCIXCC_MCL3_avail = stat->st_count -
1039 (stat->disabled_count + stat->user_disabled_count);
1040 stat = &z90crypt.hdware_info->type_mask[CEX2C];
1041 CEX2C_avail = stat->st_count -
1042 (stat->disabled_count + stat->user_disabled_count);
1043 stat = &z90crypt.hdware_info->type_mask[CEX2A];
1044 CEX2A_avail = stat->st_count -
1045 (stat->disabled_count + stat->user_disabled_count);
1046 if (PCICA_avail || PCIXCC_MCL3_avail || CEX2C_avail || CEX2A_avail) {
1047 /**
1048 * bitlength is a factor, PCICA or CEX2A are the most capable,
1049 * even with the new MCL for PCIXCC.
1050 */
1051 if ((bytelength < PCIXCC_MIN_MOD_SIZE) ||
1052 (!ext_bitlens && (bytelength < OLD_PCIXCC_MIN_MOD_SIZE))) {
1053 if (PCICA_avail) {
1054 *dev_type_p = PCICA;
1055 return 0;
1056 }
1057 if (CEX2A_avail) {
1058 *dev_type_p = CEX2A;
1059 return 0;
1060 }
1061 return -1;
1062 }
1063
1064 index_to_use = count % (PCICA_avail + PCIXCC_MCL3_avail +
1065 CEX2C_avail + CEX2A_avail);
1066 if (index_to_use < PCICA_avail)
1067 *dev_type_p = PCICA;
1068 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail))
1069 *dev_type_p = PCIXCC_MCL3;
1070 else if (index_to_use < (PCICA_avail + PCIXCC_MCL3_avail +
1071 CEX2C_avail))
1072 *dev_type_p = CEX2C;
1073 else
1074 *dev_type_p = CEX2A;
1075 count++;
1076 return 0;
1077 }
1078
1079 /* Less than OLD_PCIXCC_MIN_MOD_SIZE cannot go to a PCIXCC_MCL2 */
1080 if (bytelength < OLD_PCIXCC_MIN_MOD_SIZE)
1081 return -1;
1082 stat = &z90crypt.hdware_info->type_mask[PCIXCC_MCL2];
1083 if (stat->st_count >
1084 (stat->disabled_count + stat->user_disabled_count)) {
1085 *dev_type_p = PCIXCC_MCL2;
1086 return 0;
1087 }
1088
1089 /**
1090 * Less than PCICC_MIN_MOD_SIZE or more than OLD_PCICC_MAX_MOD_SIZE
1091 * (if we don't have the MCL applied and the newer bitlengths enabled)
1092 * cannot go to a PCICC
1093 */
1094 if ((bytelength < PCICC_MIN_MOD_SIZE) ||
1095 (!ext_bitlens && (bytelength > OLD_PCICC_MAX_MOD_SIZE))) {
1096 return -1;
1097 }
1098 stat = &z90crypt.hdware_info->type_mask[PCICC];
1099 if (stat->st_count >
1100 (stat->disabled_count + stat->user_disabled_count)) {
1101 *dev_type_p = PCICC;
1102 return 0;
1103 }
1104
1105 return -1;
1106 }
1107
1108 /**
1109 * Try the selected number, then the selected type (can be ANYDEV)
1110 */
1111 static inline int
1112 select_device(int *dev_type_p, int *device_nr_p, int bytelength)
1113 {
1114 int i, indx, devTp, low_count, low_indx;
1115 struct device_x *index_p;
1116 struct device *dev_ptr;
1117
1118 PDEBUG("device type = %d, index = %d\n", *dev_type_p, *device_nr_p);
1119 if ((*device_nr_p >= 0) && (*device_nr_p < Z90CRYPT_NUM_DEVS)) {
1120 PDEBUG("trying index = %d\n", *device_nr_p);
1121 dev_ptr = z90crypt.device_p[*device_nr_p];
1122
1123 if (dev_ptr &&
1124 (dev_ptr->dev_stat != DEV_GONE) &&
1125 (dev_ptr->disabled == 0) &&
1126 (dev_ptr->user_disabled == 0)) {
1127 PDEBUG("selected by number, index = %d\n",
1128 *device_nr_p);
1129 *dev_type_p = dev_ptr->dev_type;
1130 return *device_nr_p;
1131 }
1132 }
1133 *device_nr_p = -1;
1134 PDEBUG("trying type = %d\n", *dev_type_p);
1135 devTp = *dev_type_p;
1136 if (select_device_type(&devTp, bytelength) == -1) {
1137 PDEBUG("failed to select by type\n");
1138 return -1;
1139 }
1140 PDEBUG("selected type = %d\n", devTp);
1141 index_p = &z90crypt.hdware_info->type_x_addr[devTp];
1142 low_count = 0x0000FFFF;
1143 low_indx = -1;
1144 for (i = 0; i < z90crypt.hdware_info->type_mask[devTp].st_count; i++) {
1145 indx = index_p->device_index[i];
1146 dev_ptr = z90crypt.device_p[indx];
1147 if (dev_ptr &&
1148 (dev_ptr->dev_stat != DEV_GONE) &&
1149 (dev_ptr->disabled == 0) &&
1150 (dev_ptr->user_disabled == 0) &&
1151 (devTp == dev_ptr->dev_type) &&
1152 (low_count > dev_ptr->dev_caller_count)) {
1153 low_count = dev_ptr->dev_caller_count;
1154 low_indx = indx;
1155 }
1156 }
1157 *device_nr_p = low_indx;
1158 return low_indx;
1159 }
1160
1161 static inline int
1162 send_to_crypto_device(struct work_element *we_p)
1163 {
1164 struct caller *caller_p;
1165 struct device *device_p;
1166 int dev_nr;
1167 int bytelen = ((struct ica_rsa_modexpo *)we_p->buffer)->inputdatalength;
1168
1169 if (!we_p->requestptr)
1170 return SEN_FATAL_ERROR;
1171 caller_p = (struct caller *)we_p->requestptr;
1172 dev_nr = we_p->devindex;
1173 if (select_device(&we_p->devtype, &dev_nr, bytelen) == -1) {
1174 if (z90crypt.hdware_info->hdware_mask.st_count != 0)
1175 return SEN_RETRY;
1176 else
1177 return SEN_NOT_AVAIL;
1178 }
1179 we_p->devindex = dev_nr;
1180 device_p = z90crypt.device_p[dev_nr];
1181 if (!device_p)
1182 return SEN_NOT_AVAIL;
1183 if (device_p->dev_type != we_p->devtype)
1184 return SEN_RETRY;
1185 if (device_p->dev_caller_count >= device_p->dev_q_depth)
1186 return SEN_QUEUE_FULL;
1187 PDEBUG("device number prior to send: %d\n", dev_nr);
1188 switch (send_to_AP(dev_nr, z90crypt.cdx,
1189 caller_p->caller_dev_dep_req_l,
1190 caller_p->caller_dev_dep_req_p)) {
1191 case DEV_SEN_EXCEPTION:
1192 PRINTKC("Exception during send to device %d\n", dev_nr);
1193 z90crypt.terminating = 1;
1194 return SEN_FATAL_ERROR;
1195 case DEV_GONE:
1196 PRINTK("Device %d not available\n", dev_nr);
1197 remove_device(device_p);
1198 return SEN_NOT_AVAIL;
1199 case DEV_EMPTY:
1200 return SEN_NOT_AVAIL;
1201 case DEV_NO_WORK:
1202 return SEN_FATAL_ERROR;
1203 case DEV_BAD_MESSAGE:
1204 return SEN_USER_ERROR;
1205 case DEV_QUEUE_FULL:
1206 return SEN_QUEUE_FULL;
1207 default:
1208 case DEV_ONLINE:
1209 break;
1210 }
1211 list_add_tail(&(caller_p->caller_liste), &(device_p->dev_caller_list));
1212 device_p->dev_caller_count++;
1213 return 0;
1214 }
1215
1216 /**
1217 * Send puts the user's work on one of two queues:
1218 * the pending queue if the send was successful
1219 * the request queue if the send failed because device full or busy
1220 */
1221 static inline int
1222 z90crypt_send(struct work_element *we_p, const char *buf)
1223 {
1224 int rv;
1225
1226 PDEBUG("PID %d\n", PID());
1227
1228 if (CHK_RDWRMASK(we_p->status[0]) != STAT_NOWORK) {
1229 PDEBUG("PID %d tried to send more work but has outstanding "
1230 "work.\n", PID());
1231 return -EWORKPEND;
1232 }
1233 we_p->devindex = -1; // Reset device number
1234 spin_lock_irq(&queuespinlock);
1235 rv = send_to_crypto_device(we_p);
1236 switch (rv) {
1237 case 0:
1238 we_p->requestsent = jiffies;
1239 we_p->audit[0] |= FP_SENT;
1240 list_add_tail(&we_p->liste, &pending_list);
1241 ++pendingq_count;
1242 we_p->audit[0] |= FP_PENDING;
1243 break;
1244 case SEN_BUSY:
1245 case SEN_QUEUE_FULL:
1246 rv = 0;
1247 we_p->devindex = -1; // any device will do
1248 we_p->requestsent = jiffies;
1249 list_add_tail(&we_p->liste, &request_list);
1250 ++requestq_count;
1251 we_p->audit[0] |= FP_REQUEST;
1252 break;
1253 case SEN_RETRY:
1254 rv = -ERESTARTSYS;
1255 break;
1256 case SEN_NOT_AVAIL:
1257 PRINTK("*** No devices available.\n");
1258 rv = we_p->retcode = -ENODEV;
1259 we_p->status[0] |= STAT_FAILED;
1260 break;
1261 case REC_OPERAND_INV:
1262 case REC_OPERAND_SIZE:
1263 case REC_EVEN_MOD:
1264 case REC_INVALID_PAD:
1265 rv = we_p->retcode = -EINVAL;
1266 we_p->status[0] |= STAT_FAILED;
1267 break;
1268 default:
1269 we_p->retcode = rv;
1270 we_p->status[0] |= STAT_FAILED;
1271 break;
1272 }
1273 if (rv != -ERESTARTSYS)
1274 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1275 spin_unlock_irq(&queuespinlock);
1276 if (rv == 0)
1277 tasklet_schedule(&reader_tasklet);
1278 return rv;
1279 }
1280
1281 /**
1282 * process_results copies the user's work from kernel space.
1283 */
1284 static inline int
1285 z90crypt_process_results(struct work_element *we_p, char __user *buf)
1286 {
1287 int rv;
1288
1289 PDEBUG("we_p %p (PID %d)\n", we_p, PID());
1290
1291 LONG2DEVPTR(we_p->devindex)->dev_total_req_cnt++;
1292 SET_RDWRMASK(we_p->status[0], STAT_READPEND);
1293
1294 rv = 0;
1295 if (!we_p->buffer) {
1296 PRINTK("we_p %p PID %d in STAT_READPEND: buffer NULL.\n",
1297 we_p, PID());
1298 rv = -ENOBUFF;
1299 }
1300
1301 if (!rv)
1302 if ((rv = copy_to_user(buf, we_p->buffer, we_p->buff_size))) {
1303 PDEBUG("copy_to_user failed: rv = %d\n", rv);
1304 rv = -EFAULT;
1305 }
1306
1307 if (!rv)
1308 rv = we_p->retcode;
1309 if (!rv)
1310 if (we_p->resp_buff_size
1311 && copy_to_user(we_p->resp_addr, we_p->resp_buff,
1312 we_p->resp_buff_size))
1313 rv = -EFAULT;
1314
1315 SET_RDWRMASK(we_p->status[0], STAT_NOWORK);
1316 return rv;
1317 }
1318
1319 static unsigned char NULL_psmid[8] =
1320 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
1321
1322 /**
1323 * Used in device configuration functions
1324 */
1325 #define MAX_RESET 90
1326
1327 /**
1328 * This is used only for PCICC support
1329 */
1330 static inline int
1331 is_PKCS11_padded(unsigned char *buffer, int length)
1332 {
1333 int i;
1334 if ((buffer[0] != 0x00) || (buffer[1] != 0x01))
1335 return 0;
1336 for (i = 2; i < length; i++)
1337 if (buffer[i] != 0xFF)
1338 break;
1339 if ((i < 10) || (i == length))
1340 return 0;
1341 if (buffer[i] != 0x00)
1342 return 0;
1343 return 1;
1344 }
1345
1346 /**
1347 * This is used only for PCICC support
1348 */
1349 static inline int
1350 is_PKCS12_padded(unsigned char *buffer, int length)
1351 {
1352 int i;
1353 if ((buffer[0] != 0x00) || (buffer[1] != 0x02))
1354 return 0;
1355 for (i = 2; i < length; i++)
1356 if (buffer[i] == 0x00)
1357 break;
1358 if ((i < 10) || (i == length))
1359 return 0;
1360 if (buffer[i] != 0x00)
1361 return 0;
1362 return 1;
1363 }
1364
1365 /**
1366 * builds struct caller and converts message from generic format to
1367 * device-dependent format
1368 * func is ICARSAMODEXPO or ICARSACRT
1369 * function is PCI_FUNC_KEY_ENCRYPT or PCI_FUNC_KEY_DECRYPT
1370 */
1371 static inline int
1372 build_caller(struct work_element *we_p, short function)
1373 {
1374 int rv;
1375 struct caller *caller_p = (struct caller *)we_p->requestptr;
1376
1377 if ((we_p->devtype != PCICC) && (we_p->devtype != PCICA) &&
1378 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1379 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A))
1380 return SEN_NOT_AVAIL;
1381
1382 memcpy(caller_p->caller_id, we_p->caller_id,
1383 sizeof(caller_p->caller_id));
1384 caller_p->caller_dev_dep_req_p = caller_p->caller_dev_dep_req;
1385 caller_p->caller_dev_dep_req_l = MAX_RESPONSE_SIZE;
1386 caller_p->caller_buf_p = we_p->buffer;
1387 INIT_LIST_HEAD(&(caller_p->caller_liste));
1388
1389 rv = convert_request(we_p->buffer, we_p->funccode, function,
1390 z90crypt.cdx, we_p->devtype,
1391 &caller_p->caller_dev_dep_req_l,
1392 caller_p->caller_dev_dep_req_p);
1393 if (rv) {
1394 if (rv == SEN_NOT_AVAIL)
1395 PDEBUG("request can't be processed on hdwr avail\n");
1396 else
1397 PRINTK("Error from convert_request: %d\n", rv);
1398 }
1399 else
1400 memcpy(&(caller_p->caller_dev_dep_req_p[4]), we_p->caller_id,8);
1401 return rv;
1402 }
1403
1404 static inline void
1405 unbuild_caller(struct device *device_p, struct caller *caller_p)
1406 {
1407 if (!caller_p)
1408 return;
1409 if (caller_p->caller_liste.next && caller_p->caller_liste.prev)
1410 if (!list_empty(&caller_p->caller_liste)) {
1411 list_del_init(&caller_p->caller_liste);
1412 device_p->dev_caller_count--;
1413 }
1414 memset(caller_p->caller_id, 0, sizeof(caller_p->caller_id));
1415 }
1416
1417 static inline int
1418 get_crypto_request_buffer(struct work_element *we_p)
1419 {
1420 struct ica_rsa_modexpo *mex_p;
1421 struct ica_rsa_modexpo_crt *crt_p;
1422 unsigned char *temp_buffer;
1423 short function;
1424 int rv;
1425
1426 mex_p = (struct ica_rsa_modexpo *) we_p->buffer;
1427 crt_p = (struct ica_rsa_modexpo_crt *) we_p->buffer;
1428
1429 PDEBUG("device type input = %d\n", we_p->devtype);
1430
1431 if (z90crypt.terminating)
1432 return REC_NO_RESPONSE;
1433 if (memcmp(we_p->caller_id, NULL_psmid, 8) == 0) {
1434 PRINTK("psmid zeroes\n");
1435 return SEN_FATAL_ERROR;
1436 }
1437 if (!we_p->buffer) {
1438 PRINTK("buffer pointer NULL\n");
1439 return SEN_USER_ERROR;
1440 }
1441 if (!we_p->requestptr) {
1442 PRINTK("caller pointer NULL\n");
1443 return SEN_USER_ERROR;
1444 }
1445
1446 if ((we_p->devtype != PCICA) && (we_p->devtype != PCICC) &&
1447 (we_p->devtype != PCIXCC_MCL2) && (we_p->devtype != PCIXCC_MCL3) &&
1448 (we_p->devtype != CEX2C) && (we_p->devtype != CEX2A) &&
1449 (we_p->devtype != ANYDEV)) {
1450 PRINTK("invalid device type\n");
1451 return SEN_USER_ERROR;
1452 }
1453
1454 if ((mex_p->inputdatalength < 1) ||
1455 (mex_p->inputdatalength > MAX_MOD_SIZE)) {
1456 PRINTK("inputdatalength[%d] is not valid\n",
1457 mex_p->inputdatalength);
1458 return SEN_USER_ERROR;
1459 }
1460
1461 if (mex_p->outputdatalength < mex_p->inputdatalength) {
1462 PRINTK("outputdatalength[%d] < inputdatalength[%d]\n",
1463 mex_p->outputdatalength, mex_p->inputdatalength);
1464 return SEN_USER_ERROR;
1465 }
1466
1467 if (!mex_p->inputdata || !mex_p->outputdata) {
1468 PRINTK("inputdata[%p] or outputdata[%p] is NULL\n",
1469 mex_p->outputdata, mex_p->inputdata);
1470 return SEN_USER_ERROR;
1471 }
1472
1473 /**
1474 * As long as outputdatalength is big enough, we can set the
1475 * outputdatalength equal to the inputdatalength, since that is the
1476 * number of bytes we will copy in any case
1477 */
1478 mex_p->outputdatalength = mex_p->inputdatalength;
1479
1480 rv = 0;
1481 switch (we_p->funccode) {
1482 case ICARSAMODEXPO:
1483 if (!mex_p->b_key || !mex_p->n_modulus)
1484 rv = SEN_USER_ERROR;
1485 break;
1486 case ICARSACRT:
1487 if (!IS_EVEN(crt_p->inputdatalength)) {
1488 PRINTK("inputdatalength[%d] is odd, CRT form\n",
1489 crt_p->inputdatalength);
1490 rv = SEN_USER_ERROR;
1491 break;
1492 }
1493 if (!crt_p->bp_key ||
1494 !crt_p->bq_key ||
1495 !crt_p->np_prime ||
1496 !crt_p->nq_prime ||
1497 !crt_p->u_mult_inv) {
1498 PRINTK("CRT form, bad data: %p/%p/%p/%p/%p\n",
1499 crt_p->bp_key, crt_p->bq_key,
1500 crt_p->np_prime, crt_p->nq_prime,
1501 crt_p->u_mult_inv);
1502 rv = SEN_USER_ERROR;
1503 }
1504 break;
1505 default:
1506 PRINTK("bad func = %d\n", we_p->funccode);
1507 rv = SEN_USER_ERROR;
1508 break;
1509 }
1510 if (rv != 0)
1511 return rv;
1512
1513 if (select_device_type(&we_p->devtype, mex_p->inputdatalength) < 0)
1514 return SEN_NOT_AVAIL;
1515
1516 temp_buffer = (unsigned char *)we_p + sizeof(struct work_element) +
1517 sizeof(struct caller);
1518 if (copy_from_user(temp_buffer, mex_p->inputdata,
1519 mex_p->inputdatalength) != 0)
1520 return SEN_RELEASED;
1521
1522 function = PCI_FUNC_KEY_ENCRYPT;
1523 switch (we_p->devtype) {
1524 /* PCICA and CEX2A do everything with a simple RSA mod-expo operation */
1525 case PCICA:
1526 case CEX2A:
1527 function = PCI_FUNC_KEY_ENCRYPT;
1528 break;
1529 /**
1530 * PCIXCC_MCL2 does all Mod-Expo form with a simple RSA mod-expo
1531 * operation, and all CRT forms with a PKCS-1.2 format decrypt.
1532 * PCIXCC_MCL3 and CEX2C do all Mod-Expo and CRT forms with a simple RSA
1533 * mod-expo operation
1534 */
1535 case PCIXCC_MCL2:
1536 if (we_p->funccode == ICARSAMODEXPO)
1537 function = PCI_FUNC_KEY_ENCRYPT;
1538 else
1539 function = PCI_FUNC_KEY_DECRYPT;
1540 break;
1541 case PCIXCC_MCL3:
1542 case CEX2C:
1543 if (we_p->funccode == ICARSAMODEXPO)
1544 function = PCI_FUNC_KEY_ENCRYPT;
1545 else
1546 function = PCI_FUNC_KEY_DECRYPT;
1547 break;
1548 /**
1549 * PCICC does everything as a PKCS-1.2 format request
1550 */
1551 case PCICC:
1552 /* PCICC cannot handle input that is is PKCS#1.1 padded */
1553 if (is_PKCS11_padded(temp_buffer, mex_p->inputdatalength)) {
1554 return SEN_NOT_AVAIL;
1555 }
1556 if (we_p->funccode == ICARSAMODEXPO) {
1557 if (is_PKCS12_padded(temp_buffer,
1558 mex_p->inputdatalength))
1559 function = PCI_FUNC_KEY_ENCRYPT;
1560 else
1561 function = PCI_FUNC_KEY_DECRYPT;
1562 } else
1563 /* all CRT forms are decrypts */
1564 function = PCI_FUNC_KEY_DECRYPT;
1565 break;
1566 }
1567 PDEBUG("function: %04x\n", function);
1568 rv = build_caller(we_p, function);
1569 PDEBUG("rv from build_caller = %d\n", rv);
1570 return rv;
1571 }
1572
1573 static inline int
1574 z90crypt_prepare(struct work_element *we_p, unsigned int funccode,
1575 const char __user *buffer)
1576 {
1577 int rv;
1578
1579 we_p->devindex = -1;
1580 if (funccode == ICARSAMODEXPO)
1581 we_p->buff_size = sizeof(struct ica_rsa_modexpo);
1582 else
1583 we_p->buff_size = sizeof(struct ica_rsa_modexpo_crt);
1584
1585 if (copy_from_user(we_p->buffer, buffer, we_p->buff_size))
1586 return -EFAULT;
1587
1588 we_p->audit[0] |= FP_COPYFROM;
1589 SET_RDWRMASK(we_p->status[0], STAT_WRITTEN);
1590 we_p->funccode = funccode;
1591 we_p->devtype = -1;
1592 we_p->audit[0] |= FP_BUFFREQ;
1593 rv = get_crypto_request_buffer(we_p);
1594 switch (rv) {
1595 case 0:
1596 we_p->audit[0] |= FP_BUFFGOT;
1597 break;
1598 case SEN_USER_ERROR:
1599 rv = -EINVAL;
1600 break;
1601 case SEN_QUEUE_FULL:
1602 rv = 0;
1603 break;
1604 case SEN_RELEASED:
1605 rv = -EFAULT;
1606 break;
1607 case REC_NO_RESPONSE:
1608 rv = -ENODEV;
1609 break;
1610 case SEN_NOT_AVAIL:
1611 case EGETBUFF:
1612 rv = -EGETBUFF;
1613 break;
1614 default:
1615 PRINTK("rv = %d\n", rv);
1616 rv = -EGETBUFF;
1617 break;
1618 }
1619 if (CHK_RDWRMASK(we_p->status[0]) == STAT_WRITTEN)
1620 SET_RDWRMASK(we_p->status[0], STAT_DEFAULT);
1621 return rv;
1622 }
1623
1624 static inline void
1625 purge_work_element(struct work_element *we_p)
1626 {
1627 struct list_head *lptr;
1628
1629 spin_lock_irq(&queuespinlock);
1630 list_for_each(lptr, &request_list) {
1631 if (lptr == &we_p->liste) {
1632 list_del_init(lptr);
1633 requestq_count--;
1634 break;
1635 }
1636 }
1637 list_for_each(lptr, &pending_list) {
1638 if (lptr == &we_p->liste) {
1639 list_del_init(lptr);
1640 pendingq_count--;
1641 break;
1642 }
1643 }
1644 spin_unlock_irq(&queuespinlock);
1645 }
1646
1647 /**
1648 * Build the request and send it.
1649 */
1650 static inline int
1651 z90crypt_rsa(struct priv_data *private_data_p, pid_t pid,
1652 unsigned int cmd, unsigned long arg)
1653 {
1654 struct work_element *we_p;
1655 int rv;
1656
1657 if ((rv = allocate_work_element(&we_p, private_data_p, pid))) {
1658 PDEBUG("PID %d: allocate_work_element returned ENOMEM\n", pid);
1659 return rv;
1660 }
1661 if ((rv = z90crypt_prepare(we_p, cmd, (const char __user *)arg)))
1662 PDEBUG("PID %d: rv = %d from z90crypt_prepare\n", pid, rv);
1663 if (!rv)
1664 if ((rv = z90crypt_send(we_p, (const char *)arg)))
1665 PDEBUG("PID %d: rv %d from z90crypt_send.\n", pid, rv);
1666 if (!rv) {
1667 we_p->audit[0] |= FP_ASLEEP;
1668 wait_event(we_p->waitq, atomic_read(&we_p->alarmrung));
1669 we_p->audit[0] |= FP_AWAKE;
1670 rv = we_p->retcode;
1671 }
1672 if (!rv)
1673 rv = z90crypt_process_results(we_p, (char __user *)arg);
1674
1675 if ((we_p->status[0] & STAT_FAILED)) {
1676 switch (rv) {
1677 /**
1678 * EINVAL *after* receive is almost always a padding error or
1679 * length error issued by a coprocessor (not an accelerator).
1680 * We convert this return value to -EGETBUFF which should
1681 * trigger a fallback to software.
1682 */
1683 case -EINVAL:
1684 if ((we_p->devtype != PCICA) &&
1685 (we_p->devtype != CEX2A))
1686 rv = -EGETBUFF;
1687 break;
1688 case -ETIMEOUT:
1689 if (z90crypt.mask.st_count > 0)
1690 rv = -ERESTARTSYS; // retry with another
1691 else
1692 rv = -ENODEV; // no cards left
1693 /* fall through to clean up request queue */
1694 case -ERESTARTSYS:
1695 case -ERELEASED:
1696 switch (CHK_RDWRMASK(we_p->status[0])) {
1697 case STAT_WRITTEN:
1698 purge_work_element(we_p);
1699 break;
1700 case STAT_READPEND:
1701 case STAT_NOWORK:
1702 default:
1703 break;
1704 }
1705 break;
1706 default:
1707 we_p->status[0] ^= STAT_FAILED;
1708 break;
1709 }
1710 }
1711 free_page((long)we_p);
1712 return rv;
1713 }
1714
1715 /**
1716 * This function is a little long, but it's really just one large switch
1717 * statement.
1718 */
1719 static long
1720 z90crypt_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1721 {
1722 struct priv_data *private_data_p = filp->private_data;
1723 unsigned char *status;
1724 unsigned char *qdepth;
1725 unsigned int *reqcnt;
1726 struct ica_z90_status *pstat;
1727 int ret, i, loopLim, tempstat;
1728 static int deprecated_msg_count1 = 0;
1729 static int deprecated_msg_count2 = 0;
1730
1731 PDEBUG("filp %p (PID %d), cmd 0x%08X\n", filp, PID(), cmd);
1732 PDEBUG("cmd 0x%08X: dir %s, size 0x%04X, type 0x%02X, nr 0x%02X\n",
1733 cmd,
1734 !_IOC_DIR(cmd) ? "NO"
1735 : ((_IOC_DIR(cmd) == (_IOC_READ|_IOC_WRITE)) ? "RW"
1736 : ((_IOC_DIR(cmd) == _IOC_READ) ? "RD"
1737 : "WR")),
1738 _IOC_SIZE(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd));
1739
1740 if (_IOC_TYPE(cmd) != Z90_IOCTL_MAGIC) {
1741 PRINTK("cmd 0x%08X contains bad magic\n", cmd);
1742 return -ENOTTY;
1743 }
1744
1745 ret = 0;
1746 switch (cmd) {
1747 case ICARSAMODEXPO:
1748 case ICARSACRT:
1749 if (quiesce_z90crypt) {
1750 ret = -EQUIESCE;
1751 break;
1752 }
1753 ret = -ENODEV; // Default if no devices
1754 loopLim = z90crypt.hdware_info->hdware_mask.st_count -
1755 (z90crypt.hdware_info->hdware_mask.disabled_count +
1756 z90crypt.hdware_info->hdware_mask.user_disabled_count);
1757 for (i = 0; i < loopLim; i++) {
1758 ret = z90crypt_rsa(private_data_p, PID(), cmd, arg);
1759 if (ret != -ERESTARTSYS)
1760 break;
1761 }
1762 if (ret == -ERESTARTSYS)
1763 ret = -ENODEV;
1764 break;
1765
1766 case Z90STAT_TOTALCOUNT:
1767 tempstat = get_status_totalcount();
1768 if (copy_to_user((int __user *)arg, &tempstat,sizeof(int)) != 0)
1769 ret = -EFAULT;
1770 break;
1771
1772 case Z90STAT_PCICACOUNT:
1773 tempstat = get_status_PCICAcount();
1774 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1775 ret = -EFAULT;
1776 break;
1777
1778 case Z90STAT_PCICCCOUNT:
1779 tempstat = get_status_PCICCcount();
1780 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1781 ret = -EFAULT;
1782 break;
1783
1784 case Z90STAT_PCIXCCMCL2COUNT:
1785 tempstat = get_status_PCIXCCMCL2count();
1786 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1787 ret = -EFAULT;
1788 break;
1789
1790 case Z90STAT_PCIXCCMCL3COUNT:
1791 tempstat = get_status_PCIXCCMCL3count();
1792 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1793 ret = -EFAULT;
1794 break;
1795
1796 case Z90STAT_CEX2CCOUNT:
1797 tempstat = get_status_CEX2Ccount();
1798 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1799 ret = -EFAULT;
1800 break;
1801
1802 case Z90STAT_CEX2ACOUNT:
1803 tempstat = get_status_CEX2Acount();
1804 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1805 ret = -EFAULT;
1806 break;
1807
1808 case Z90STAT_REQUESTQ_COUNT:
1809 tempstat = get_status_requestq_count();
1810 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1811 ret = -EFAULT;
1812 break;
1813
1814 case Z90STAT_PENDINGQ_COUNT:
1815 tempstat = get_status_pendingq_count();
1816 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1817 ret = -EFAULT;
1818 break;
1819
1820 case Z90STAT_TOTALOPEN_COUNT:
1821 tempstat = get_status_totalopen_count();
1822 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1823 ret = -EFAULT;
1824 break;
1825
1826 case Z90STAT_DOMAIN_INDEX:
1827 tempstat = get_status_domain_index();
1828 if (copy_to_user((int __user *)arg, &tempstat, sizeof(int)) != 0)
1829 ret = -EFAULT;
1830 break;
1831
1832 case Z90STAT_STATUS_MASK:
1833 status = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1834 if (!status) {
1835 PRINTK("kmalloc for status failed!\n");
1836 ret = -ENOMEM;
1837 break;
1838 }
1839 get_status_status_mask(status);
1840 if (copy_to_user((char __user *) arg, status, Z90CRYPT_NUM_APS)
1841 != 0)
1842 ret = -EFAULT;
1843 kfree(status);
1844 break;
1845
1846 case Z90STAT_QDEPTH_MASK:
1847 qdepth = kmalloc(Z90CRYPT_NUM_APS, GFP_KERNEL);
1848 if (!qdepth) {
1849 PRINTK("kmalloc for qdepth failed!\n");
1850 ret = -ENOMEM;
1851 break;
1852 }
1853 get_status_qdepth_mask(qdepth);
1854 if (copy_to_user((char __user *) arg, qdepth, Z90CRYPT_NUM_APS) != 0)
1855 ret = -EFAULT;
1856 kfree(qdepth);
1857 break;
1858
1859 case Z90STAT_PERDEV_REQCNT:
1860 reqcnt = kmalloc(sizeof(int) * Z90CRYPT_NUM_APS, GFP_KERNEL);
1861 if (!reqcnt) {
1862 PRINTK("kmalloc for reqcnt failed!\n");
1863 ret = -ENOMEM;
1864 break;
1865 }
1866 get_status_perdevice_reqcnt(reqcnt);
1867 if (copy_to_user((char __user *) arg, reqcnt,
1868 Z90CRYPT_NUM_APS * sizeof(int)) != 0)
1869 ret = -EFAULT;
1870 kfree(reqcnt);
1871 break;
1872
1873 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1874 case ICAZ90STATUS:
1875 if (deprecated_msg_count1 < 20) {
1876 PRINTK("deprecated call to ioctl (ICAZ90STATUS)!\n");
1877 deprecated_msg_count1++;
1878 if (deprecated_msg_count1 == 20)
1879 PRINTK("No longer issuing messages related to "
1880 "deprecated call to ICAZ90STATUS.\n");
1881 }
1882
1883 pstat = kmalloc(sizeof(struct ica_z90_status), GFP_KERNEL);
1884 if (!pstat) {
1885 PRINTK("kmalloc for pstat failed!\n");
1886 ret = -ENOMEM;
1887 break;
1888 }
1889
1890 pstat->totalcount = get_status_totalcount();
1891 pstat->leedslitecount = get_status_PCICAcount();
1892 pstat->leeds2count = get_status_PCICCcount();
1893 pstat->requestqWaitCount = get_status_requestq_count();
1894 pstat->pendingqWaitCount = get_status_pendingq_count();
1895 pstat->totalOpenCount = get_status_totalopen_count();
1896 pstat->cryptoDomain = get_status_domain_index();
1897 get_status_status_mask(pstat->status);
1898 get_status_qdepth_mask(pstat->qdepth);
1899
1900 if (copy_to_user((struct ica_z90_status __user *) arg, pstat,
1901 sizeof(struct ica_z90_status)) != 0)
1902 ret = -EFAULT;
1903 kfree(pstat);
1904 break;
1905
1906 /* THIS IS DEPRECATED. USE THE NEW STATUS CALLS */
1907 case Z90STAT_PCIXCCCOUNT:
1908 if (deprecated_msg_count2 < 20) {
1909 PRINTK("deprecated ioctl (Z90STAT_PCIXCCCOUNT)!\n");
1910 deprecated_msg_count2++;
1911 if (deprecated_msg_count2 == 20)
1912 PRINTK("No longer issuing messages about depre"
1913 "cated ioctl Z90STAT_PCIXCCCOUNT.\n");
1914 }
1915
1916 tempstat = get_status_PCIXCCcount();
1917 if (copy_to_user((int *)arg, &tempstat, sizeof(int)) != 0)
1918 ret = -EFAULT;
1919 break;
1920
1921 case Z90QUIESCE:
1922 if (current->euid != 0) {
1923 PRINTK("QUIESCE fails: euid %d\n",
1924 current->euid);
1925 ret = -EACCES;
1926 } else {
1927 PRINTK("QUIESCE device from PID %d\n", PID());
1928 quiesce_z90crypt = 1;
1929 }
1930 break;
1931
1932 default:
1933 /* user passed an invalid IOCTL number */
1934 PDEBUG("cmd 0x%08X contains invalid ioctl code\n", cmd);
1935 ret = -ENOTTY;
1936 break;
1937 }
1938
1939 return ret;
1940 }
1941
1942 static inline int
1943 sprintcl(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1944 {
1945 int hl, i;
1946
1947 hl = 0;
1948 for (i = 0; i < len; i++)
1949 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]);
1950 hl += sprintf(outaddr+hl, " ");
1951
1952 return hl;
1953 }
1954
1955 static inline int
1956 sprintrw(unsigned char *outaddr, unsigned char *addr, unsigned int len)
1957 {
1958 int hl, inl, c, cx;
1959
1960 hl = sprintf(outaddr, " ");
1961 inl = 0;
1962 for (c = 0; c < (len / 16); c++) {
1963 hl += sprintcl(outaddr+hl, addr+inl, 16);
1964 inl += 16;
1965 }
1966
1967 cx = len%16;
1968 if (cx) {
1969 hl += sprintcl(outaddr+hl, addr+inl, cx);
1970 inl += cx;
1971 }
1972
1973 hl += sprintf(outaddr+hl, "\n");
1974
1975 return hl;
1976 }
1977
1978 static inline int
1979 sprinthx(unsigned char *title, unsigned char *outaddr,
1980 unsigned char *addr, unsigned int len)
1981 {
1982 int hl, inl, r, rx;
1983
1984 hl = sprintf(outaddr, "\n%s\n", title);
1985 inl = 0;
1986 for (r = 0; r < (len / 64); r++) {
1987 hl += sprintrw(outaddr+hl, addr+inl, 64);
1988 inl += 64;
1989 }
1990 rx = len % 64;
1991 if (rx) {
1992 hl += sprintrw(outaddr+hl, addr+inl, rx);
1993 inl += rx;
1994 }
1995
1996 hl += sprintf(outaddr+hl, "\n");
1997
1998 return hl;
1999 }
2000
2001 static inline int
2002 sprinthx4(unsigned char *title, unsigned char *outaddr,
2003 unsigned int *array, unsigned int len)
2004 {
2005 int hl, r;
2006
2007 hl = sprintf(outaddr, "\n%s\n", title);
2008
2009 for (r = 0; r < len; r++) {
2010 if ((r % 8) == 0)
2011 hl += sprintf(outaddr+hl, " ");
2012 hl += sprintf(outaddr+hl, "%08X ", array[r]);
2013 if ((r % 8) == 7)
2014 hl += sprintf(outaddr+hl, "\n");
2015 }
2016
2017 hl += sprintf(outaddr+hl, "\n");
2018
2019 return hl;
2020 }
2021
2022 static int
2023 z90crypt_status(char *resp_buff, char **start, off_t offset,
2024 int count, int *eof, void *data)
2025 {
2026 unsigned char *workarea;
2027 int len;
2028
2029 /* resp_buff is a page. Use the right half for a work area */
2030 workarea = resp_buff+2000;
2031 len = 0;
2032 len += sprintf(resp_buff+len, "\nz90crypt version: %d.%d.%d\n",
2033 z90crypt_VERSION, z90crypt_RELEASE, z90crypt_VARIANT);
2034 len += sprintf(resp_buff+len, "Cryptographic domain: %d\n",
2035 get_status_domain_index());
2036 len += sprintf(resp_buff+len, "Total device count: %d\n",
2037 get_status_totalcount());
2038 len += sprintf(resp_buff+len, "PCICA count: %d\n",
2039 get_status_PCICAcount());
2040 len += sprintf(resp_buff+len, "PCICC count: %d\n",
2041 get_status_PCICCcount());
2042 len += sprintf(resp_buff+len, "PCIXCC MCL2 count: %d\n",
2043 get_status_PCIXCCMCL2count());
2044 len += sprintf(resp_buff+len, "PCIXCC MCL3 count: %d\n",
2045 get_status_PCIXCCMCL3count());
2046 len += sprintf(resp_buff+len, "CEX2C count: %d\n",
2047 get_status_CEX2Ccount());
2048 len += sprintf(resp_buff+len, "CEX2A count: %d\n",
2049 get_status_CEX2Acount());
2050 len += sprintf(resp_buff+len, "requestq count: %d\n",
2051 get_status_requestq_count());
2052 len += sprintf(resp_buff+len, "pendingq count: %d\n",
2053 get_status_pendingq_count());
2054 len += sprintf(resp_buff+len, "Total open handles: %d\n\n",
2055 get_status_totalopen_count());
2056 len += sprinthx(
2057 "Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
2058 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A",
2059 resp_buff+len,
2060 get_status_status_mask(workarea),
2061 Z90CRYPT_NUM_APS);
2062 len += sprinthx("Waiting work element counts",
2063 resp_buff+len,
2064 get_status_qdepth_mask(workarea),
2065 Z90CRYPT_NUM_APS);
2066 len += sprinthx4(
2067 "Per-device successfully completed request counts",
2068 resp_buff+len,
2069 get_status_perdevice_reqcnt((unsigned int *)workarea),
2070 Z90CRYPT_NUM_APS);
2071 *eof = 1;
2072 memset(workarea, 0, Z90CRYPT_NUM_APS * sizeof(unsigned int));
2073 return len;
2074 }
2075
2076 static inline void
2077 disable_card(int card_index)
2078 {
2079 struct device *devp;
2080
2081 devp = LONG2DEVPTR(card_index);
2082 if (!devp || devp->user_disabled)
2083 return;
2084 devp->user_disabled = 1;
2085 z90crypt.hdware_info->hdware_mask.user_disabled_count++;
2086 if (devp->dev_type == -1)
2087 return;
2088 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count++;
2089 }
2090
2091 static inline void
2092 enable_card(int card_index)
2093 {
2094 struct device *devp;
2095
2096 devp = LONG2DEVPTR(card_index);
2097 if (!devp || !devp->user_disabled)
2098 return;
2099 devp->user_disabled = 0;
2100 z90crypt.hdware_info->hdware_mask.user_disabled_count--;
2101 if (devp->dev_type == -1)
2102 return;
2103 z90crypt.hdware_info->type_mask[devp->dev_type].user_disabled_count--;
2104 }
2105
2106 static int
2107 z90crypt_status_write(struct file *file, const char __user *buffer,
2108 unsigned long count, void *data)
2109 {
2110 int j, eol;
2111 unsigned char *lbuf, *ptr;
2112 unsigned int local_count;
2113
2114 #define LBUFSIZE 1200
2115 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
2116 if (!lbuf) {
2117 PRINTK("kmalloc failed!\n");
2118 return 0;
2119 }
2120
2121 if (count <= 0)
2122 return 0;
2123
2124 local_count = UMIN((unsigned int)count, LBUFSIZE-1);
2125
2126 if (copy_from_user(lbuf, buffer, local_count) != 0) {
2127 kfree(lbuf);
2128 return -EFAULT;
2129 }
2130
2131 lbuf[local_count] = '\0';
2132
2133 ptr = strstr(lbuf, "Online devices");
2134 if (ptr == 0) {
2135 PRINTK("Unable to parse data (missing \"Online devices\")\n");
2136 kfree(lbuf);
2137 return count;
2138 }
2139
2140 ptr = strstr(ptr, "\n");
2141 if (ptr == 0) {
2142 PRINTK("Unable to parse data (missing newline after \"Online devices\")\n");
2143 kfree(lbuf);
2144 return count;
2145 }
2146 ptr++;
2147
2148 if (strstr(ptr, "Waiting work element counts") == NULL) {
2149 PRINTK("Unable to parse data (missing \"Waiting work element counts\")\n");
2150 kfree(lbuf);
2151 return count;
2152 }
2153
2154 j = 0;
2155 eol = 0;
2156 while ((j < 64) && (*ptr != '\0')) {
2157 switch (*ptr) {
2158 case '\t':
2159 case ' ':
2160 break;
2161 case '\n':
2162 default:
2163 eol = 1;
2164 break;
2165 case '0': // no device
2166 case '1': // PCICA
2167 case '2': // PCICC
2168 case '3': // PCIXCC_MCL2
2169 case '4': // PCIXCC_MCL3
2170 case '5': // CEX2C
2171 case '6': // CEX2A
2172 j++;
2173 break;
2174 case 'd':
2175 case 'D':
2176 disable_card(j);
2177 j++;
2178 break;
2179 case 'e':
2180 case 'E':
2181 enable_card(j);
2182 j++;
2183 break;
2184 }
2185 if (eol)
2186 break;
2187 ptr++;
2188 }
2189
2190 kfree(lbuf);
2191 return count;
2192 }
2193
2194 /**
2195 * Functions that run under a timer, with no process id
2196 *
2197 * The task functions:
2198 * z90crypt_reader_task
2199 * helper_send_work
2200 * helper_handle_work_element
2201 * helper_receive_rc
2202 * z90crypt_config_task
2203 * z90crypt_cleanup_task
2204 *
2205 * Helper functions:
2206 * z90crypt_schedule_reader_timer
2207 * z90crypt_schedule_reader_task
2208 * z90crypt_schedule_config_task
2209 * z90crypt_schedule_cleanup_task
2210 */
2211 static inline int
2212 receive_from_crypto_device(int index, unsigned char *psmid, int *buff_len_p,
2213 unsigned char *buff, unsigned char __user **dest_p_p)
2214 {
2215 int dv, rv;
2216 struct device *dev_ptr;
2217 struct caller *caller_p;
2218 struct ica_rsa_modexpo *icaMsg_p;
2219 struct list_head *ptr, *tptr;
2220
2221 memcpy(psmid, NULL_psmid, sizeof(NULL_psmid));
2222
2223 if (z90crypt.terminating)
2224 return REC_FATAL_ERROR;
2225
2226 caller_p = 0;
2227 dev_ptr = z90crypt.device_p[index];
2228 rv = 0;
2229 do {
2230 if (!dev_ptr || dev_ptr->disabled) {
2231 rv = REC_NO_WORK; // a disabled device can't return work
2232 break;
2233 }
2234 if (dev_ptr->dev_self_x != index) {
2235 PRINTKC("Corrupt dev ptr\n");
2236 z90crypt.terminating = 1;
2237 rv = REC_FATAL_ERROR;
2238 break;
2239 }
2240 if (!dev_ptr->dev_resp_l || !dev_ptr->dev_resp_p) {
2241 dv = DEV_REC_EXCEPTION;
2242 PRINTK("dev_resp_l = %d, dev_resp_p = %p\n",
2243 dev_ptr->dev_resp_l, dev_ptr->dev_resp_p);
2244 } else {
2245 PDEBUG("Dequeue called for device %d\n", index);
2246 dv = receive_from_AP(index, z90crypt.cdx,
2247 dev_ptr->dev_resp_l,
2248 dev_ptr->dev_resp_p, psmid);
2249 }
2250 switch (dv) {
2251 case DEV_REC_EXCEPTION:
2252 rv = REC_FATAL_ERROR;
2253 z90crypt.terminating = 1;
2254 PRINTKC("Exception in receive from device %d\n",
2255 index);
2256 break;
2257 case DEV_ONLINE:
2258 rv = 0;
2259 break;
2260 case DEV_EMPTY:
2261 rv = REC_EMPTY;
2262 break;
2263 case DEV_NO_WORK:
2264 rv = REC_NO_WORK;
2265 break;
2266 case DEV_BAD_MESSAGE:
2267 case DEV_GONE:
2268 case REC_HARDWAR_ERR:
2269 default:
2270 rv = REC_NO_RESPONSE;
2271 break;
2272 }
2273 if (rv)
2274 break;
2275 if (dev_ptr->dev_caller_count <= 0) {
2276 rv = REC_USER_GONE;
2277 break;
2278 }
2279
2280 list_for_each_safe(ptr, tptr, &dev_ptr->dev_caller_list) {
2281 caller_p = list_entry(ptr, struct caller, caller_liste);
2282 if (!memcmp(caller_p->caller_id, psmid,
2283 sizeof(caller_p->caller_id))) {
2284 if (!list_empty(&caller_p->caller_liste)) {
2285 list_del_init(ptr);
2286 dev_ptr->dev_caller_count--;
2287 break;
2288 }
2289 }
2290 caller_p = 0;
2291 }
2292 if (!caller_p) {
2293 PRINTKW("Unable to locate PSMID %02X%02X%02X%02X%02X"
2294 "%02X%02X%02X in device list\n",
2295 psmid[0], psmid[1], psmid[2], psmid[3],
2296 psmid[4], psmid[5], psmid[6], psmid[7]);
2297 rv = REC_USER_GONE;
2298 break;
2299 }
2300
2301 PDEBUG("caller_p after successful receive: %p\n", caller_p);
2302 rv = convert_response(dev_ptr->dev_resp_p,
2303 caller_p->caller_buf_p, buff_len_p, buff);
2304 switch (rv) {
2305 case REC_USE_PCICA:
2306 break;
2307 case REC_OPERAND_INV:
2308 case REC_OPERAND_SIZE:
2309 case REC_EVEN_MOD:
2310 case REC_INVALID_PAD:
2311 PDEBUG("device %d: 'user error' %d\n", index, rv);
2312 break;
2313 case WRONG_DEVICE_TYPE:
2314 case REC_HARDWAR_ERR:
2315 case REC_BAD_MESSAGE:
2316 PRINTKW("device %d: hardware error %d\n", index, rv);
2317 rv = REC_NO_RESPONSE;
2318 break;
2319 default:
2320 PDEBUG("device %d: rv = %d\n", index, rv);
2321 break;
2322 }
2323 } while (0);
2324
2325 switch (rv) {
2326 case 0:
2327 PDEBUG("Successful receive from device %d\n", index);
2328 icaMsg_p = (struct ica_rsa_modexpo *)caller_p->caller_buf_p;
2329 *dest_p_p = icaMsg_p->outputdata;
2330 if (*buff_len_p == 0)
2331 PRINTK("Zero *buff_len_p\n");
2332 break;
2333 case REC_NO_RESPONSE:
2334 PRINTKW("Removing device %d from availability\n", index);
2335 remove_device(dev_ptr);
2336 break;
2337 }
2338
2339 if (caller_p)
2340 unbuild_caller(dev_ptr, caller_p);
2341
2342 return rv;
2343 }
2344
2345 static inline void
2346 helper_send_work(int index)
2347 {
2348 struct work_element *rq_p;
2349 int rv;
2350
2351 if (list_empty(&request_list))
2352 return;
2353 requestq_count--;
2354 rq_p = list_entry(request_list.next, struct work_element, liste);
2355 list_del_init(&rq_p->liste);
2356 rq_p->audit[1] |= FP_REMREQUEST;
2357 if (rq_p->devtype == SHRT2DEVPTR(index)->dev_type) {
2358 rq_p->devindex = SHRT2LONG(index);
2359 rv = send_to_crypto_device(rq_p);
2360 if (rv == 0) {
2361 rq_p->requestsent = jiffies;
2362 rq_p->audit[0] |= FP_SENT;
2363 list_add_tail(&rq_p->liste, &pending_list);
2364 ++pendingq_count;
2365 rq_p->audit[0] |= FP_PENDING;
2366 } else {
2367 switch (rv) {
2368 case REC_OPERAND_INV:
2369 case REC_OPERAND_SIZE:
2370 case REC_EVEN_MOD:
2371 case REC_INVALID_PAD:
2372 rq_p->retcode = -EINVAL;
2373 break;
2374 case SEN_NOT_AVAIL:
2375 case SEN_RETRY:
2376 case REC_NO_RESPONSE:
2377 default:
2378 if (z90crypt.mask.st_count > 1)
2379 rq_p->retcode =
2380 -ERESTARTSYS;
2381 else
2382 rq_p->retcode = -ENODEV;
2383 break;
2384 }
2385 rq_p->status[0] |= STAT_FAILED;
2386 rq_p->audit[1] |= FP_AWAKENING;
2387 atomic_set(&rq_p->alarmrung, 1);
2388 wake_up(&rq_p->waitq);
2389 }
2390 } else {
2391 if (z90crypt.mask.st_count > 1)
2392 rq_p->retcode = -ERESTARTSYS;
2393 else
2394 rq_p->retcode = -ENODEV;
2395 rq_p->status[0] |= STAT_FAILED;
2396 rq_p->audit[1] |= FP_AWAKENING;
2397 atomic_set(&rq_p->alarmrung, 1);
2398 wake_up(&rq_p->waitq);
2399 }
2400 }
2401
2402 static inline void
2403 helper_handle_work_element(int index, unsigned char psmid[8], int rc,
2404 int buff_len, unsigned char *buff,
2405 unsigned char __user *resp_addr)
2406 {
2407 struct work_element *pq_p;
2408 struct list_head *lptr, *tptr;
2409
2410 pq_p = 0;
2411 list_for_each_safe(lptr, tptr, &pending_list) {
2412 pq_p = list_entry(lptr, struct work_element, liste);
2413 if (!memcmp(pq_p->caller_id, psmid, sizeof(pq_p->caller_id))) {
2414 list_del_init(lptr);
2415 pendingq_count--;
2416 pq_p->audit[1] |= FP_NOTPENDING;
2417 break;
2418 }
2419 pq_p = 0;
2420 }
2421
2422 if (!pq_p) {
2423 PRINTK("device %d has work but no caller exists on pending Q\n",
2424 SHRT2LONG(index));
2425 return;
2426 }
2427
2428 switch (rc) {
2429 case 0:
2430 pq_p->resp_buff_size = buff_len;
2431 pq_p->audit[1] |= FP_RESPSIZESET;
2432 if (buff_len) {
2433 pq_p->resp_addr = resp_addr;
2434 pq_p->audit[1] |= FP_RESPADDRCOPIED;
2435 memcpy(pq_p->resp_buff, buff, buff_len);
2436 pq_p->audit[1] |= FP_RESPBUFFCOPIED;
2437 }
2438 break;
2439 case REC_OPERAND_INV:
2440 case REC_OPERAND_SIZE:
2441 case REC_EVEN_MOD:
2442 case REC_INVALID_PAD:
2443 PDEBUG("-EINVAL after application error %d\n", rc);
2444 pq_p->retcode = -EINVAL;
2445 pq_p->status[0] |= STAT_FAILED;
2446 break;
2447 case REC_USE_PCICA:
2448 pq_p->retcode = -ERESTARTSYS;
2449 pq_p->status[0] |= STAT_FAILED;
2450 break;
2451 case REC_NO_RESPONSE:
2452 default:
2453 if (z90crypt.mask.st_count > 1)
2454 pq_p->retcode = -ERESTARTSYS;
2455 else
2456 pq_p->retcode = -ENODEV;
2457 pq_p->status[0] |= STAT_FAILED;
2458 break;
2459 }
2460 if ((pq_p->status[0] != STAT_FAILED) || (pq_p->retcode != -ERELEASED)) {
2461 pq_p->audit[1] |= FP_AWAKENING;
2462 atomic_set(&pq_p->alarmrung, 1);
2463 wake_up(&pq_p->waitq);
2464 }
2465 }
2466
2467 /**
2468 * return TRUE if the work element should be removed from the queue
2469 */
2470 static inline int
2471 helper_receive_rc(int index, int *rc_p)
2472 {
2473 switch (*rc_p) {
2474 case 0:
2475 case REC_OPERAND_INV:
2476 case REC_OPERAND_SIZE:
2477 case REC_EVEN_MOD:
2478 case REC_INVALID_PAD:
2479 case REC_USE_PCICA:
2480 break;
2481
2482 case REC_BUSY:
2483 case REC_NO_WORK:
2484 case REC_EMPTY:
2485 case REC_RETRY_DEV:
2486 case REC_FATAL_ERROR:
2487 return 0;
2488
2489 case REC_NO_RESPONSE:
2490 break;
2491
2492 default:
2493 PRINTK("rc %d, device %d converted to REC_NO_RESPONSE\n",
2494 *rc_p, SHRT2LONG(index));
2495 *rc_p = REC_NO_RESPONSE;
2496 break;
2497 }
2498 return 1;
2499 }
2500
2501 static inline void
2502 z90crypt_schedule_reader_timer(void)
2503 {
2504 if (timer_pending(&reader_timer))
2505 return;
2506 if (mod_timer(&reader_timer, jiffies+(READERTIME*HZ/1000)) != 0)
2507 PRINTK("Timer pending while modifying reader timer\n");
2508 }
2509
2510 static void
2511 z90crypt_reader_task(unsigned long ptr)
2512 {
2513 int workavail, index, rc, buff_len;
2514 unsigned char psmid[8];
2515 unsigned char __user *resp_addr;
2516 static unsigned char buff[1024];
2517
2518 /**
2519 * we use workavail = 2 to ensure 2 passes with nothing dequeued before
2520 * exiting the loop. If (pendingq_count+requestq_count) == 0 after the
2521 * loop, there is no work remaining on the queues.
2522 */
2523 resp_addr = 0;
2524 workavail = 2;
2525 buff_len = 0;
2526 while (workavail) {
2527 workavail--;
2528 rc = 0;
2529 spin_lock_irq(&queuespinlock);
2530 memset(buff, 0x00, sizeof(buff));
2531
2532 /* Dequeue once from each device in round robin. */
2533 for (index = 0; index < z90crypt.mask.st_count; index++) {
2534 PDEBUG("About to receive.\n");
2535 rc = receive_from_crypto_device(SHRT2LONG(index),
2536 psmid,
2537 &buff_len,
2538 buff,
2539 &resp_addr);
2540 PDEBUG("Dequeued: rc = %d.\n", rc);
2541
2542 if (helper_receive_rc(index, &rc)) {
2543 if (rc != REC_NO_RESPONSE) {
2544 helper_send_work(index);
2545 workavail = 2;
2546 }
2547
2548 helper_handle_work_element(index, psmid, rc,
2549 buff_len, buff,
2550 resp_addr);
2551 }
2552
2553 if (rc == REC_FATAL_ERROR)
2554 PRINTKW("REC_FATAL_ERROR from device %d!\n",
2555 SHRT2LONG(index));
2556 }
2557 spin_unlock_irq(&queuespinlock);
2558 }
2559
2560 if (pendingq_count + requestq_count)
2561 z90crypt_schedule_reader_timer();
2562 }
2563
2564 static inline void
2565 z90crypt_schedule_config_task(unsigned int expiration)
2566 {
2567 if (timer_pending(&config_timer))
2568 return;
2569 if (mod_timer(&config_timer, jiffies+(expiration*HZ)) != 0)
2570 PRINTK("Timer pending while modifying config timer\n");
2571 }
2572
2573 static void
2574 z90crypt_config_task(unsigned long ptr)
2575 {
2576 int rc;
2577
2578 PDEBUG("jiffies %ld\n", jiffies);
2579
2580 if ((rc = refresh_z90crypt(&z90crypt.cdx)))
2581 PRINTK("Error %d detected in refresh_z90crypt.\n", rc);
2582 /* If return was fatal, don't bother reconfiguring */
2583 if ((rc != TSQ_FATAL_ERROR) && (rc != RSQ_FATAL_ERROR))
2584 z90crypt_schedule_config_task(CONFIGTIME);
2585 }
2586
2587 static inline void
2588 z90crypt_schedule_cleanup_task(void)
2589 {
2590 if (timer_pending(&cleanup_timer))
2591 return;
2592 if (mod_timer(&cleanup_timer, jiffies+(CLEANUPTIME*HZ)) != 0)
2593 PRINTK("Timer pending while modifying cleanup timer\n");
2594 }
2595
2596 static inline void
2597 helper_drain_queues(void)
2598 {
2599 struct work_element *pq_p;
2600 struct list_head *lptr, *tptr;
2601
2602 list_for_each_safe(lptr, tptr, &pending_list) {
2603 pq_p = list_entry(lptr, struct work_element, liste);
2604 pq_p->retcode = -ENODEV;
2605 pq_p->status[0] |= STAT_FAILED;
2606 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2607 (struct caller *)pq_p->requestptr);
2608 list_del_init(lptr);
2609 pendingq_count--;
2610 pq_p->audit[1] |= FP_NOTPENDING;
2611 pq_p->audit[1] |= FP_AWAKENING;
2612 atomic_set(&pq_p->alarmrung, 1);
2613 wake_up(&pq_p->waitq);
2614 }
2615
2616 list_for_each_safe(lptr, tptr, &request_list) {
2617 pq_p = list_entry(lptr, struct work_element, liste);
2618 pq_p->retcode = -ENODEV;
2619 pq_p->status[0] |= STAT_FAILED;
2620 list_del_init(lptr);
2621 requestq_count--;
2622 pq_p->audit[1] |= FP_REMREQUEST;
2623 pq_p->audit[1] |= FP_AWAKENING;
2624 atomic_set(&pq_p->alarmrung, 1);
2625 wake_up(&pq_p->waitq);
2626 }
2627 }
2628
2629 static inline void
2630 helper_timeout_requests(void)
2631 {
2632 struct work_element *pq_p;
2633 struct list_head *lptr, *tptr;
2634 long timelimit;
2635
2636 timelimit = jiffies - (CLEANUPTIME * HZ);
2637 /* The list is in strict chronological order */
2638 list_for_each_safe(lptr, tptr, &pending_list) {
2639 pq_p = list_entry(lptr, struct work_element, liste);
2640 if (pq_p->requestsent >= timelimit)
2641 break;
2642 PRINTKW("Purging(PQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2643 ((struct caller *)pq_p->requestptr)->caller_id[0],
2644 ((struct caller *)pq_p->requestptr)->caller_id[1],
2645 ((struct caller *)pq_p->requestptr)->caller_id[2],
2646 ((struct caller *)pq_p->requestptr)->caller_id[3],
2647 ((struct caller *)pq_p->requestptr)->caller_id[4],
2648 ((struct caller *)pq_p->requestptr)->caller_id[5],
2649 ((struct caller *)pq_p->requestptr)->caller_id[6],
2650 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2651 pq_p->retcode = -ETIMEOUT;
2652 pq_p->status[0] |= STAT_FAILED;
2653 /* get this off any caller queue it may be on */
2654 unbuild_caller(LONG2DEVPTR(pq_p->devindex),
2655 (struct caller *) pq_p->requestptr);
2656 list_del_init(lptr);
2657 pendingq_count--;
2658 pq_p->audit[1] |= FP_TIMEDOUT;
2659 pq_p->audit[1] |= FP_NOTPENDING;
2660 pq_p->audit[1] |= FP_AWAKENING;
2661 atomic_set(&pq_p->alarmrung, 1);
2662 wake_up(&pq_p->waitq);
2663 }
2664
2665 /**
2666 * If pending count is zero, items left on the request queue may
2667 * never be processed.
2668 */
2669 if (pendingq_count <= 0) {
2670 list_for_each_safe(lptr, tptr, &request_list) {
2671 pq_p = list_entry(lptr, struct work_element, liste);
2672 if (pq_p->requestsent >= timelimit)
2673 break;
2674 PRINTKW("Purging(RQ) PSMID %02X%02X%02X%02X%02X%02X%02X%02X\n",
2675 ((struct caller *)pq_p->requestptr)->caller_id[0],
2676 ((struct caller *)pq_p->requestptr)->caller_id[1],
2677 ((struct caller *)pq_p->requestptr)->caller_id[2],
2678 ((struct caller *)pq_p->requestptr)->caller_id[3],
2679 ((struct caller *)pq_p->requestptr)->caller_id[4],
2680 ((struct caller *)pq_p->requestptr)->caller_id[5],
2681 ((struct caller *)pq_p->requestptr)->caller_id[6],
2682 ((struct caller *)pq_p->requestptr)->caller_id[7]);
2683 pq_p->retcode = -ETIMEOUT;
2684 pq_p->status[0] |= STAT_FAILED;
2685 list_del_init(lptr);
2686 requestq_count--;
2687 pq_p->audit[1] |= FP_TIMEDOUT;
2688 pq_p->audit[1] |= FP_REMREQUEST;
2689 pq_p->audit[1] |= FP_AWAKENING;
2690 atomic_set(&pq_p->alarmrung, 1);
2691 wake_up(&pq_p->waitq);
2692 }
2693 }
2694 }
2695
2696 static void
2697 z90crypt_cleanup_task(unsigned long ptr)
2698 {
2699 PDEBUG("jiffies %ld\n", jiffies);
2700 spin_lock_irq(&queuespinlock);
2701 if (z90crypt.mask.st_count <= 0) // no devices!
2702 helper_drain_queues();
2703 else
2704 helper_timeout_requests();
2705 spin_unlock_irq(&queuespinlock);
2706 z90crypt_schedule_cleanup_task();
2707 }
2708
2709 static void
2710 z90crypt_schedule_reader_task(unsigned long ptr)
2711 {
2712 tasklet_schedule(&reader_tasklet);
2713 }
2714
2715 /**
2716 * Lowlevel Functions:
2717 *
2718 * create_z90crypt: creates and initializes basic data structures
2719 * refresh_z90crypt: re-initializes basic data structures
2720 * find_crypto_devices: returns a count and mask of hardware status
2721 * create_crypto_device: builds the descriptor for a device
2722 * destroy_crypto_device: unallocates the descriptor for a device
2723 * destroy_z90crypt: drains all work, unallocates structs
2724 */
2725
2726 /**
2727 * build the z90crypt root structure using the given domain index
2728 */
2729 static int
2730 create_z90crypt(int *cdx_p)
2731 {
2732 struct hdware_block *hdware_blk_p;
2733
2734 memset(&z90crypt, 0x00, sizeof(struct z90crypt));
2735 z90crypt.domain_established = 0;
2736 z90crypt.len = sizeof(struct z90crypt);
2737 z90crypt.max_count = Z90CRYPT_NUM_DEVS;
2738 z90crypt.cdx = *cdx_p;
2739
2740 hdware_blk_p = (struct hdware_block *)
2741 kmalloc(sizeof(struct hdware_block), GFP_ATOMIC);
2742 if (!hdware_blk_p) {
2743 PDEBUG("kmalloc for hardware block failed\n");
2744 return ENOMEM;
2745 }
2746 memset(hdware_blk_p, 0x00, sizeof(struct hdware_block));
2747 z90crypt.hdware_info = hdware_blk_p;
2748
2749 return 0;
2750 }
2751
2752 static inline int
2753 helper_scan_devices(int cdx_array[16], int *cdx_p, int *correct_cdx_found)
2754 {
2755 enum hdstat hd_stat;
2756 int q_depth, dev_type;
2757 int indx, chkdom, numdomains;
2758
2759 q_depth = dev_type = numdomains = 0;
2760 for (chkdom = 0; chkdom <= 15; cdx_array[chkdom++] = -1);
2761 for (indx = 0; indx < z90crypt.max_count; indx++) {
2762 hd_stat = HD_NOT_THERE;
2763 numdomains = 0;
2764 for (chkdom = 0; chkdom <= 15; chkdom++) {
2765 hd_stat = query_online(indx, chkdom, MAX_RESET,
2766 &q_depth, &dev_type);
2767 if (hd_stat == HD_TSQ_EXCEPTION) {
2768 z90crypt.terminating = 1;
2769 PRINTKC("exception taken!\n");
2770 break;
2771 }
2772 if (hd_stat == HD_ONLINE) {
2773 cdx_array[numdomains++] = chkdom;
2774 if (*cdx_p == chkdom) {
2775 *correct_cdx_found = 1;
2776 break;
2777 }
2778 }
2779 }
2780 if ((*correct_cdx_found == 1) || (numdomains != 0))
2781 break;
2782 if (z90crypt.terminating)
2783 break;
2784 }
2785 return numdomains;
2786 }
2787
2788 static inline int
2789 probe_crypto_domain(int *cdx_p)
2790 {
2791 int cdx_array[16];
2792 char cdx_array_text[53], temp[5];
2793 int correct_cdx_found, numdomains;
2794
2795 correct_cdx_found = 0;
2796 numdomains = helper_scan_devices(cdx_array, cdx_p, &correct_cdx_found);
2797
2798 if (z90crypt.terminating)
2799 return TSQ_FATAL_ERROR;
2800
2801 if (correct_cdx_found)
2802 return 0;
2803
2804 if (numdomains == 0) {
2805 PRINTKW("Unable to find crypto domain: No devices found\n");
2806 return Z90C_NO_DEVICES;
2807 }
2808
2809 if (numdomains == 1) {
2810 if (*cdx_p == -1) {
2811 *cdx_p = cdx_array[0];
2812 return 0;
2813 }
2814 PRINTKW("incorrect domain: specified = %d, found = %d\n",
2815 *cdx_p, cdx_array[0]);
2816 return Z90C_INCORRECT_DOMAIN;
2817 }
2818
2819 numdomains--;
2820 sprintf(cdx_array_text, "%d", cdx_array[numdomains]);
2821 while (numdomains) {
2822 numdomains--;
2823 sprintf(temp, ", %d", cdx_array[numdomains]);
2824 strcat(cdx_array_text, temp);
2825 }
2826
2827 PRINTKW("ambiguous domain detected: specified = %d, found array = %s\n",
2828 *cdx_p, cdx_array_text);
2829 return Z90C_AMBIGUOUS_DOMAIN;
2830 }
2831
2832 static int
2833 refresh_z90crypt(int *cdx_p)
2834 {
2835 int i, j, indx, rv;
2836 static struct status local_mask;
2837 struct device *devPtr;
2838 unsigned char oldStat, newStat;
2839 int return_unchanged;
2840
2841 if (z90crypt.len != sizeof(z90crypt))
2842 return ENOTINIT;
2843 if (z90crypt.terminating)
2844 return TSQ_FATAL_ERROR;
2845 rv = 0;
2846 if (!z90crypt.hdware_info->hdware_mask.st_count &&
2847 !z90crypt.domain_established) {
2848 rv = probe_crypto_domain(cdx_p);
2849 if (z90crypt.terminating)
2850 return TSQ_FATAL_ERROR;
2851 if (rv == Z90C_NO_DEVICES)
2852 return 0; // try later
2853 if (rv)
2854 return rv;
2855 z90crypt.cdx = *cdx_p;
2856 z90crypt.domain_established = 1;
2857 }
2858 rv = find_crypto_devices(&local_mask);
2859 if (rv) {
2860 PRINTK("find crypto devices returned %d\n", rv);
2861 return rv;
2862 }
2863 if (!memcmp(&local_mask, &z90crypt.hdware_info->hdware_mask,
2864 sizeof(struct status))) {
2865 return_unchanged = 1;
2866 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++) {
2867 /**
2868 * Check for disabled cards. If any device is marked
2869 * disabled, destroy it.
2870 */
2871 for (j = 0;
2872 j < z90crypt.hdware_info->type_mask[i].st_count;
2873 j++) {
2874 indx = z90crypt.hdware_info->type_x_addr[i].
2875 device_index[j];
2876 devPtr = z90crypt.device_p[indx];
2877 if (devPtr && devPtr->disabled) {
2878 local_mask.st_mask[indx] = HD_NOT_THERE;
2879 return_unchanged = 0;
2880 }
2881 }
2882 }
2883 if (return_unchanged == 1)
2884 return 0;
2885 }
2886
2887 spin_lock_irq(&queuespinlock);
2888 for (i = 0; i < z90crypt.max_count; i++) {
2889 oldStat = z90crypt.hdware_info->hdware_mask.st_mask[i];
2890 newStat = local_mask.st_mask[i];
2891 if ((oldStat == HD_ONLINE) && (newStat != HD_ONLINE))
2892 destroy_crypto_device(i);
2893 else if ((oldStat != HD_ONLINE) && (newStat == HD_ONLINE)) {
2894 rv = create_crypto_device(i);
2895 if (rv >= REC_FATAL_ERROR)
2896 return rv;
2897 if (rv != 0) {
2898 local_mask.st_mask[i] = HD_NOT_THERE;
2899 local_mask.st_count--;
2900 }
2901 }
2902 }
2903 memcpy(z90crypt.hdware_info->hdware_mask.st_mask, local_mask.st_mask,
2904 sizeof(local_mask.st_mask));
2905 z90crypt.hdware_info->hdware_mask.st_count = local_mask.st_count;
2906 z90crypt.hdware_info->hdware_mask.disabled_count =
2907 local_mask.disabled_count;
2908 refresh_index_array(&z90crypt.mask, &z90crypt.overall_device_x);
2909 for (i = 0; i < Z90CRYPT_NUM_TYPES; i++)
2910 refresh_index_array(&(z90crypt.hdware_info->type_mask[i]),
2911 &(z90crypt.hdware_info->type_x_addr[i]));
2912 spin_unlock_irq(&queuespinlock);
2913
2914 return rv;
2915 }
2916
2917 static int
2918 find_crypto_devices(struct status *deviceMask)
2919 {
2920 int i, q_depth, dev_type;
2921 enum hdstat hd_stat;
2922
2923 deviceMask->st_count = 0;
2924 deviceMask->disabled_count = 0;
2925 deviceMask->user_disabled_count = 0;
2926
2927 for (i = 0; i < z90crypt.max_count; i++) {
2928 hd_stat = query_online(i, z90crypt.cdx, MAX_RESET, &q_depth,
2929 &dev_type);
2930 if (hd_stat == HD_TSQ_EXCEPTION) {
2931 z90crypt.terminating = 1;
2932 PRINTKC("Exception during probe for crypto devices\n");
2933 return TSQ_FATAL_ERROR;
2934 }
2935 deviceMask->st_mask[i] = hd_stat;
2936 if (hd_stat == HD_ONLINE) {
2937 PDEBUG("Got an online crypto!: %d\n", i);
2938 PDEBUG("Got a queue depth of %d\n", q_depth);
2939 PDEBUG("Got a device type of %d\n", dev_type);
2940 if (q_depth <= 0)
2941 return TSQ_FATAL_ERROR;
2942 deviceMask->st_count++;
2943 z90crypt.q_depth_array[i] = q_depth;
2944 z90crypt.dev_type_array[i] = dev_type;
2945 }
2946 }
2947
2948 return 0;
2949 }
2950
2951 static int
2952 refresh_index_array(struct status *status_str, struct device_x *index_array)
2953 {
2954 int i, count;
2955 enum devstat stat;
2956
2957 i = -1;
2958 count = 0;
2959 do {
2960 stat = status_str->st_mask[++i];
2961 if (stat == DEV_ONLINE)
2962 index_array->device_index[count++] = i;
2963 } while ((i < Z90CRYPT_NUM_DEVS) && (count < status_str->st_count));
2964
2965 return count;
2966 }
2967
2968 static int
2969 create_crypto_device(int index)
2970 {
2971 int rv, devstat, total_size;
2972 struct device *dev_ptr;
2973 struct status *type_str_p;
2974 int deviceType;
2975
2976 dev_ptr = z90crypt.device_p[index];
2977 if (!dev_ptr) {
2978 total_size = sizeof(struct device) +
2979 z90crypt.q_depth_array[index] * sizeof(int);
2980
2981 dev_ptr = (struct device *) kmalloc(total_size, GFP_ATOMIC);
2982 if (!dev_ptr) {
2983 PRINTK("kmalloc device %d failed\n", index);
2984 return ENOMEM;
2985 }
2986 memset(dev_ptr, 0, total_size);
2987 dev_ptr->dev_resp_p = kmalloc(MAX_RESPONSE_SIZE, GFP_ATOMIC);
2988 if (!dev_ptr->dev_resp_p) {
2989 kfree(dev_ptr);
2990 PRINTK("kmalloc device %d rec buffer failed\n", index);
2991 return ENOMEM;
2992 }
2993 dev_ptr->dev_resp_l = MAX_RESPONSE_SIZE;
2994 INIT_LIST_HEAD(&(dev_ptr->dev_caller_list));
2995 }
2996
2997 devstat = reset_device(index, z90crypt.cdx, MAX_RESET);
2998 if (devstat == DEV_RSQ_EXCEPTION) {
2999 PRINTK("exception during reset device %d\n", index);
3000 kfree(dev_ptr->dev_resp_p);
3001 kfree(dev_ptr);
3002 return RSQ_FATAL_ERROR;
3003 }
3004 if (devstat == DEV_ONLINE) {
3005 dev_ptr->dev_self_x = index;
3006 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3007 if (dev_ptr->dev_type == NILDEV) {
3008 rv = probe_device_type(dev_ptr);
3009 if (rv) {
3010 PRINTK("rv = %d from probe_device_type %d\n",
3011 rv, index);
3012 kfree(dev_ptr->dev_resp_p);
3013 kfree(dev_ptr);
3014 return rv;
3015 }
3016 }
3017 if (dev_ptr->dev_type == PCIXCC_UNK) {
3018 rv = probe_PCIXCC_type(dev_ptr);
3019 if (rv) {
3020 PRINTK("rv = %d from probe_PCIXCC_type %d\n",
3021 rv, index);
3022 kfree(dev_ptr->dev_resp_p);
3023 kfree(dev_ptr);
3024 return rv;
3025 }
3026 }
3027 deviceType = dev_ptr->dev_type;
3028 z90crypt.dev_type_array[index] = deviceType;
3029 if (deviceType == PCICA)
3030 z90crypt.hdware_info->device_type_array[index] = 1;
3031 else if (deviceType == PCICC)
3032 z90crypt.hdware_info->device_type_array[index] = 2;
3033 else if (deviceType == PCIXCC_MCL2)
3034 z90crypt.hdware_info->device_type_array[index] = 3;
3035 else if (deviceType == PCIXCC_MCL3)
3036 z90crypt.hdware_info->device_type_array[index] = 4;
3037 else if (deviceType == CEX2C)
3038 z90crypt.hdware_info->device_type_array[index] = 5;
3039 else if (deviceType == CEX2A)
3040 z90crypt.hdware_info->device_type_array[index] = 6;
3041 else // No idea how this would happen.
3042 z90crypt.hdware_info->device_type_array[index] = -1;
3043 }
3044
3045 /**
3046 * 'q_depth' returned by the hardware is one less than
3047 * the actual depth
3048 */
3049 dev_ptr->dev_q_depth = z90crypt.q_depth_array[index];
3050 dev_ptr->dev_type = z90crypt.dev_type_array[index];
3051 dev_ptr->dev_stat = devstat;
3052 dev_ptr->disabled = 0;
3053 z90crypt.device_p[index] = dev_ptr;
3054
3055 if (devstat == DEV_ONLINE) {
3056 if (z90crypt.mask.st_mask[index] != DEV_ONLINE) {
3057 z90crypt.mask.st_mask[index] = DEV_ONLINE;
3058 z90crypt.mask.st_count++;
3059 }
3060 deviceType = dev_ptr->dev_type;
3061 type_str_p = &z90crypt.hdware_info->type_mask[deviceType];
3062 if (type_str_p->st_mask[index] != DEV_ONLINE) {
3063 type_str_p->st_mask[index] = DEV_ONLINE;
3064 type_str_p->st_count++;
3065 }
3066 }
3067
3068 return 0;
3069 }
3070
3071 static int
3072 destroy_crypto_device(int index)
3073 {
3074 struct device *dev_ptr;
3075 int t, disabledFlag;
3076
3077 dev_ptr = z90crypt.device_p[index];
3078
3079 /* remember device type; get rid of device struct */
3080 if (dev_ptr) {
3081 disabledFlag = dev_ptr->disabled;
3082 t = dev_ptr->dev_type;
3083 kfree(dev_ptr->dev_resp_p);
3084 kfree(dev_ptr);
3085 } else {
3086 disabledFlag = 0;
3087 t = -1;
3088 }
3089 z90crypt.device_p[index] = 0;
3090
3091 /* if the type is valid, remove the device from the type_mask */
3092 if ((t != -1) && z90crypt.hdware_info->type_mask[t].st_mask[index]) {
3093 z90crypt.hdware_info->type_mask[t].st_mask[index] = 0x00;
3094 z90crypt.hdware_info->type_mask[t].st_count--;
3095 if (disabledFlag == 1)
3096 z90crypt.hdware_info->type_mask[t].disabled_count--;
3097 }
3098 if (z90crypt.mask.st_mask[index] != DEV_GONE) {
3099 z90crypt.mask.st_mask[index] = DEV_GONE;
3100 z90crypt.mask.st_count--;
3101 }
3102 z90crypt.hdware_info->device_type_array[index] = 0;
3103
3104 return 0;
3105 }
3106
3107 static void
3108 destroy_z90crypt(void)
3109 {
3110 int i;
3111
3112 for (i = 0; i < z90crypt.max_count; i++)
3113 if (z90crypt.device_p[i])
3114 destroy_crypto_device(i);
3115 kfree(z90crypt.hdware_info);
3116 memset((void *)&z90crypt, 0, sizeof(z90crypt));
3117 }
3118
3119 static unsigned char static_testmsg[384] = {
3120 0x00,0x00,0x00,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x00,0x06,0x00,0x00,
3121 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x58,
3122 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x43,0x43,
3123 0x41,0x2d,0x41,0x50,0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,0x00,0x00,0x00,0x00,
3124 0x50,0x4b,0x00,0x00,0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3125 0x00,0x00,0x00,0x00,0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3126 0x00,0x00,0x00,0x00,0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x32,
3127 0x01,0x00,0xa0,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3128 0xb8,0x05,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3129 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3130 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3131 0x00,0x00,0x00,0x00,0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3132 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,0x49,0x43,0x53,0x46,
3133 0x20,0x20,0x20,0x20,0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,0x2d,0x31,0x2e,0x32,
3134 0x37,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
3135 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
3136 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
3137 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,0x88,0x1e,0x00,0x00,
3138 0x57,0x00,0x00,0x00,0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,0x03,0x02,0x00,0x00,
3139 0x40,0x01,0x00,0x01,0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,0xf6,0xd2,0x7b,0x58,
3140 0x4b,0xf9,0x28,0x68,0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,0x63,0x42,0xef,0xf8,
3141 0xfd,0xa4,0xf8,0xb0,0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,0x53,0x8c,0x6f,0x4e,
3142 0x72,0x8f,0x6c,0x04,0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,0xf7,0xdd,0xfd,0x4f,
3143 0x11,0x36,0x95,0x5d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00
3144 };
3145
3146 static int
3147 probe_device_type(struct device *devPtr)
3148 {
3149 int rv, dv, i, index, length;
3150 unsigned char psmid[8];
3151 static unsigned char loc_testmsg[sizeof(static_testmsg)];
3152
3153 index = devPtr->dev_self_x;
3154 rv = 0;
3155 do {
3156 memcpy(loc_testmsg, static_testmsg, sizeof(static_testmsg));
3157 length = sizeof(static_testmsg) - 24;
3158 /* the -24 allows for the header */
3159 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3160 if (dv) {
3161 PDEBUG("dv returned by send during probe: %d\n", dv);
3162 if (dv == DEV_SEN_EXCEPTION) {
3163 rv = SEN_FATAL_ERROR;
3164 PRINTKC("exception in send to AP %d\n", index);
3165 break;
3166 }
3167 PDEBUG("return value from send_to_AP: %d\n", rv);
3168 switch (dv) {
3169 case DEV_GONE:
3170 PDEBUG("dev %d not available\n", index);
3171 rv = SEN_NOT_AVAIL;
3172 break;
3173 case DEV_ONLINE:
3174 rv = 0;
3175 break;
3176 case DEV_EMPTY:
3177 rv = SEN_NOT_AVAIL;
3178 break;
3179 case DEV_NO_WORK:
3180 rv = SEN_FATAL_ERROR;
3181 break;
3182 case DEV_BAD_MESSAGE:
3183 rv = SEN_USER_ERROR;
3184 break;
3185 case DEV_QUEUE_FULL:
3186 rv = SEN_QUEUE_FULL;
3187 break;
3188 default:
3189 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3190 rv = SEN_NOT_AVAIL;
3191 break;
3192 }
3193 }
3194
3195 if (rv)
3196 break;
3197
3198 for (i = 0; i < 6; i++) {
3199 mdelay(300);
3200 dv = receive_from_AP(index, z90crypt.cdx,
3201 devPtr->dev_resp_l,
3202 devPtr->dev_resp_p, psmid);
3203 PDEBUG("dv returned by DQ = %d\n", dv);
3204 if (dv == DEV_REC_EXCEPTION) {
3205 rv = REC_FATAL_ERROR;
3206 PRINTKC("exception in dequeue %d\n",
3207 index);
3208 break;
3209 }
3210 switch (dv) {
3211 case DEV_ONLINE:
3212 rv = 0;
3213 break;
3214 case DEV_EMPTY:
3215 rv = REC_EMPTY;
3216 break;
3217 case DEV_NO_WORK:
3218 rv = REC_NO_WORK;
3219 break;
3220 case DEV_BAD_MESSAGE:
3221 case DEV_GONE:
3222 default:
3223 rv = REC_NO_RESPONSE;
3224 break;
3225 }
3226 if ((rv != 0) && (rv != REC_NO_WORK))
3227 break;
3228 if (rv == 0)
3229 break;
3230 }
3231 if (rv)
3232 break;
3233 rv = (devPtr->dev_resp_p[0] == 0x00) &&
3234 (devPtr->dev_resp_p[1] == 0x86);
3235 if (rv)
3236 devPtr->dev_type = PCICC;
3237 else
3238 devPtr->dev_type = PCICA;
3239 rv = 0;
3240 } while (0);
3241 /* In a general error case, the card is not marked online */
3242 return rv;
3243 }
3244
3245 static unsigned char MCL3_testmsg[] = {
3246 0x00,0x00,0x00,0x00,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,0xEE,
3247 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3248 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3249 0x43,0x41,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3250 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x00,0x00,0x00,0x01,0xC4,0x00,0x00,0x00,0x00,
3251 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,0x00,0x00,0x00,0x00,
3252 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xDC,0x02,0x00,0x00,0x00,0x54,0x32,
3253 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xE8,0x00,0x00,0x00,0x00,0x00,0x00,0x07,0x24,
3254 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3255 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3256 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3257 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3258 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3259 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3260 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3261 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3262 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3263 0x00,0x00,0x00,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3264 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3265 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
3266 0x00,0x00,0x00,0x00,0x50,0x4B,0x00,0x0A,0x4D,0x52,0x50,0x20,0x20,0x20,0x20,0x20,
3267 0x00,0x42,0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0A,0x0B,0x0C,0x0D,
3268 0x0E,0x0F,0x00,0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,0x99,0xAA,0xBB,0xCC,0xDD,
3269 0xEE,0xFF,0xFF,0xEE,0xDD,0xCC,0xBB,0xAA,0x99,0x88,0x77,0x66,0x55,0x44,0x33,0x22,
3270 0x11,0x00,0x01,0x23,0x45,0x67,0x89,0xAB,0xCD,0xEF,0xFE,0xDC,0xBA,0x98,0x76,0x54,
3271 0x32,0x10,0x00,0x9A,0x00,0x98,0x00,0x00,0x1E,0x00,0x00,0x94,0x00,0x00,0x00,0x00,
3272 0x04,0x00,0x00,0x8C,0x00,0x00,0x00,0x40,0x02,0x00,0x00,0x40,0xBA,0xE8,0x23,0x3C,
3273 0x75,0xF3,0x91,0x61,0xD6,0x73,0x39,0xCF,0x7B,0x6D,0x8E,0x61,0x97,0x63,0x9E,0xD9,
3274 0x60,0x55,0xD6,0xC7,0xEF,0xF8,0x1E,0x63,0x95,0x17,0xCC,0x28,0x45,0x60,0x11,0xC5,
3275 0xC4,0x4E,0x66,0xC6,0xE6,0xC3,0xDE,0x8A,0x19,0x30,0xCF,0x0E,0xD7,0xAA,0xDB,0x01,
3276 0xD8,0x00,0xBB,0x8F,0x39,0x9F,0x64,0x28,0xF5,0x7A,0x77,0x49,0xCC,0x6B,0xA3,0x91,
3277 0x97,0x70,0xE7,0x60,0x1E,0x39,0xE1,0xE5,0x33,0xE1,0x15,0x63,0x69,0x08,0x80,0x4C,
3278 0x67,0xC4,0x41,0x8F,0x48,0xDF,0x26,0x98,0xF1,0xD5,0x8D,0x88,0xD9,0x6A,0xA4,0x96,
3279 0xC5,0x84,0xD9,0x30,0x49,0x67,0x7D,0x19,0xB1,0xB3,0x45,0x4D,0xB2,0x53,0x9A,0x47,
3280 0x3C,0x7C,0x55,0xBF,0xCC,0x85,0x00,0x36,0xF1,0x3D,0x93,0x53
3281 };
3282
3283 static int
3284 probe_PCIXCC_type(struct device *devPtr)
3285 {
3286 int rv, dv, i, index, length;
3287 unsigned char psmid[8];
3288 static unsigned char loc_testmsg[548];
3289 struct CPRBX *cprbx_p;
3290
3291 index = devPtr->dev_self_x;
3292 rv = 0;
3293 do {
3294 memcpy(loc_testmsg, MCL3_testmsg, sizeof(MCL3_testmsg));
3295 length = sizeof(MCL3_testmsg) - 0x0C;
3296 dv = send_to_AP(index, z90crypt.cdx, length, loc_testmsg);
3297 if (dv) {
3298 PDEBUG("dv returned = %d\n", dv);
3299 if (dv == DEV_SEN_EXCEPTION) {
3300 rv = SEN_FATAL_ERROR;
3301 PRINTKC("exception in send to AP %d\n", index);
3302 break;
3303 }
3304 PDEBUG("return value from send_to_AP: %d\n", rv);
3305 switch (dv) {
3306 case DEV_GONE:
3307 PDEBUG("dev %d not available\n", index);
3308 rv = SEN_NOT_AVAIL;
3309 break;
3310 case DEV_ONLINE:
3311 rv = 0;
3312 break;
3313 case DEV_EMPTY:
3314 rv = SEN_NOT_AVAIL;
3315 break;
3316 case DEV_NO_WORK:
3317 rv = SEN_FATAL_ERROR;
3318 break;
3319 case DEV_BAD_MESSAGE:
3320 rv = SEN_USER_ERROR;
3321 break;
3322 case DEV_QUEUE_FULL:
3323 rv = SEN_QUEUE_FULL;
3324 break;
3325 default:
3326 PRINTK("unknown dv=%d for dev %d\n", dv, index);
3327 rv = SEN_NOT_AVAIL;
3328 break;
3329 }
3330 }
3331
3332 if (rv)
3333 break;
3334
3335 for (i = 0; i < 6; i++) {
3336 mdelay(300);
3337 dv = receive_from_AP(index, z90crypt.cdx,
3338 devPtr->dev_resp_l,
3339 devPtr->dev_resp_p, psmid);
3340 PDEBUG("dv returned by DQ = %d\n", dv);
3341 if (dv == DEV_REC_EXCEPTION) {
3342 rv = REC_FATAL_ERROR;
3343 PRINTKC("exception in dequeue %d\n",
3344 index);
3345 break;
3346 }
3347 switch (dv) {
3348 case DEV_ONLINE:
3349 rv = 0;
3350 break;
3351 case DEV_EMPTY:
3352 rv = REC_EMPTY;
3353 break;
3354 case DEV_NO_WORK:
3355 rv = REC_NO_WORK;
3356 break;
3357 case DEV_BAD_MESSAGE:
3358 case DEV_GONE:
3359 default:
3360 rv = REC_NO_RESPONSE;
3361 break;
3362 }
3363 if ((rv != 0) && (rv != REC_NO_WORK))
3364 break;
3365 if (rv == 0)
3366 break;
3367 }
3368 if (rv)
3369 break;
3370 cprbx_p = (struct CPRBX *) (devPtr->dev_resp_p + 48);
3371 if ((cprbx_p->ccp_rtcode == 8) && (cprbx_p->ccp_rscode == 33)) {
3372 devPtr->dev_type = PCIXCC_MCL2;
3373 PDEBUG("device %d is MCL2\n", index);
3374 } else {
3375 devPtr->dev_type = PCIXCC_MCL3;
3376 PDEBUG("device %d is MCL3\n", index);
3377 }
3378 } while (0);
3379 /* In a general error case, the card is not marked online */
3380 return rv;
3381 }
3382
3383 module_init(z90crypt_init_module);
3384 module_exit(z90crypt_cleanup_module);