]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/s390/crypto/ap_bus.c
crypto: prefix module autoloading with "crypto-"
[mirror_ubuntu-artful-kernel.git] / drivers / s390 / crypto / ap_bus.c
1 /*
2 * Copyright IBM Corp. 2006, 2012
3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
4 * Martin Schwidefsky <schwidefsky@de.ibm.com>
5 * Ralph Wuerthner <rwuerthn@de.ibm.com>
6 * Felix Beck <felix.beck@de.ibm.com>
7 * Holger Dengler <hd@linux.vnet.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26 #define KMSG_COMPONENT "ap"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
28
29 #include <linux/kernel_stat.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/delay.h>
33 #include <linux/err.h>
34 #include <linux/interrupt.h>
35 #include <linux/workqueue.h>
36 #include <linux/slab.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/mutex.h>
40 #include <asm/reset.h>
41 #include <asm/airq.h>
42 #include <linux/atomic.h>
43 #include <asm/isc.h>
44 #include <linux/hrtimer.h>
45 #include <linux/ktime.h>
46 #include <asm/facility.h>
47 #include <linux/crypto.h>
48
49 #include "ap_bus.h"
50
51 /* Some prototypes. */
52 static void ap_scan_bus(struct work_struct *);
53 static void ap_poll_all(unsigned long);
54 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *);
55 static int ap_poll_thread_start(void);
56 static void ap_poll_thread_stop(void);
57 static void ap_request_timeout(unsigned long);
58 static inline void ap_schedule_poll_timer(void);
59 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
60 static int ap_device_remove(struct device *dev);
61 static int ap_device_probe(struct device *dev);
62 static void ap_interrupt_handler(struct airq_struct *airq);
63 static void ap_reset(struct ap_device *ap_dev);
64 static void ap_config_timeout(unsigned long ptr);
65 static int ap_select_domain(void);
66 static void ap_query_configuration(void);
67
68 /*
69 * Module description.
70 */
71 MODULE_AUTHOR("IBM Corporation");
72 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
73 "Copyright IBM Corp. 2006, 2012");
74 MODULE_LICENSE("GPL");
75 MODULE_ALIAS_CRYPTO("z90crypt");
76
77 /*
78 * Module parameter
79 */
80 int ap_domain_index = -1; /* Adjunct Processor Domain Index */
81 module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
82 MODULE_PARM_DESC(domain, "domain index for ap devices");
83 EXPORT_SYMBOL(ap_domain_index);
84
85 static int ap_thread_flag = 0;
86 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
87 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
88
89 static struct device *ap_root_device = NULL;
90 static struct ap_config_info *ap_configuration;
91 static DEFINE_SPINLOCK(ap_device_list_lock);
92 static LIST_HEAD(ap_device_list);
93
94 /*
95 * Workqueue & timer for bus rescan.
96 */
97 static struct workqueue_struct *ap_work_queue;
98 static struct timer_list ap_config_timer;
99 static int ap_config_time = AP_CONFIG_TIME;
100 static DECLARE_WORK(ap_config_work, ap_scan_bus);
101
102 /*
103 * Tasklet & timer for AP request polling and interrupts
104 */
105 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
106 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
107 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
108 static struct task_struct *ap_poll_kthread = NULL;
109 static DEFINE_MUTEX(ap_poll_thread_mutex);
110 static DEFINE_SPINLOCK(ap_poll_timer_lock);
111 static struct hrtimer ap_poll_timer;
112 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
113 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
114 static unsigned long long poll_timeout = 250000;
115
116 /* Suspend flag */
117 static int ap_suspend_flag;
118 /* Flag to check if domain was set through module parameter domain=. This is
119 * important when supsend and resume is done in a z/VM environment where the
120 * domain might change. */
121 static int user_set_domain = 0;
122 static struct bus_type ap_bus_type;
123
124 /* Adapter interrupt definitions */
125 static int ap_airq_flag;
126
127 static struct airq_struct ap_airq = {
128 .handler = ap_interrupt_handler,
129 .isc = AP_ISC,
130 };
131
132 /**
133 * ap_using_interrupts() - Returns non-zero if interrupt support is
134 * available.
135 */
136 static inline int ap_using_interrupts(void)
137 {
138 return ap_airq_flag;
139 }
140
141 /**
142 * ap_intructions_available() - Test if AP instructions are available.
143 *
144 * Returns 0 if the AP instructions are installed.
145 */
146 static inline int ap_instructions_available(void)
147 {
148 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
149 register unsigned long reg1 asm ("1") = -ENODEV;
150 register unsigned long reg2 asm ("2") = 0UL;
151
152 asm volatile(
153 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
154 "0: la %1,0\n"
155 "1:\n"
156 EX_TABLE(0b, 1b)
157 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
158 return reg1;
159 }
160
161 /**
162 * ap_interrupts_available(): Test if AP interrupts are available.
163 *
164 * Returns 1 if AP interrupts are available.
165 */
166 static int ap_interrupts_available(void)
167 {
168 return test_facility(2) && test_facility(65);
169 }
170
171 /**
172 * ap_configuration_available(): Test if AP configuration
173 * information is available.
174 *
175 * Returns 1 if AP configuration information is available.
176 */
177 #ifdef CONFIG_64BIT
178 static int ap_configuration_available(void)
179 {
180 return test_facility(2) && test_facility(12);
181 }
182 #endif
183
184 /**
185 * ap_test_queue(): Test adjunct processor queue.
186 * @qid: The AP queue number
187 * @queue_depth: Pointer to queue depth value
188 * @device_type: Pointer to device type value
189 *
190 * Returns AP queue status structure.
191 */
192 static inline struct ap_queue_status
193 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
194 {
195 register unsigned long reg0 asm ("0") = qid;
196 register struct ap_queue_status reg1 asm ("1");
197 register unsigned long reg2 asm ("2") = 0UL;
198
199 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
200 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
201 *device_type = (int) (reg2 >> 24);
202 *queue_depth = (int) (reg2 & 0xff);
203 return reg1;
204 }
205
206 /**
207 * ap_reset_queue(): Reset adjunct processor queue.
208 * @qid: The AP queue number
209 *
210 * Returns AP queue status structure.
211 */
212 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
213 {
214 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
215 register struct ap_queue_status reg1 asm ("1");
216 register unsigned long reg2 asm ("2") = 0UL;
217
218 asm volatile(
219 ".long 0xb2af0000" /* PQAP(RAPQ) */
220 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
221 return reg1;
222 }
223
224 #ifdef CONFIG_64BIT
225 /**
226 * ap_queue_interruption_control(): Enable interruption for a specific AP.
227 * @qid: The AP queue number
228 * @ind: The notification indicator byte
229 *
230 * Returns AP queue status.
231 */
232 static inline struct ap_queue_status
233 ap_queue_interruption_control(ap_qid_t qid, void *ind)
234 {
235 register unsigned long reg0 asm ("0") = qid | 0x03000000UL;
236 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC;
237 register struct ap_queue_status reg1_out asm ("1");
238 register void *reg2 asm ("2") = ind;
239 asm volatile(
240 ".long 0xb2af0000" /* PQAP(AQIC) */
241 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2)
242 :
243 : "cc" );
244 return reg1_out;
245 }
246 #endif
247
248 #ifdef CONFIG_64BIT
249 static inline struct ap_queue_status
250 __ap_query_functions(ap_qid_t qid, unsigned int *functions)
251 {
252 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23);
253 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID;
254 register unsigned long reg2 asm ("2");
255
256 asm volatile(
257 ".long 0xb2af0000\n" /* PQAP(TAPQ) */
258 "0:\n"
259 EX_TABLE(0b, 0b)
260 : "+d" (reg0), "+d" (reg1), "=d" (reg2)
261 :
262 : "cc");
263
264 *functions = (unsigned int)(reg2 >> 32);
265 return reg1;
266 }
267 #endif
268
269 #ifdef CONFIG_64BIT
270 static inline int __ap_query_configuration(struct ap_config_info *config)
271 {
272 register unsigned long reg0 asm ("0") = 0x04000000UL;
273 register unsigned long reg1 asm ("1") = -EINVAL;
274 register unsigned char *reg2 asm ("2") = (unsigned char *)config;
275
276 asm volatile(
277 ".long 0xb2af0000\n" /* PQAP(QCI) */
278 "0: la %1,0\n"
279 "1:\n"
280 EX_TABLE(0b, 1b)
281 : "+d" (reg0), "+d" (reg1), "+d" (reg2)
282 :
283 : "cc");
284
285 return reg1;
286 }
287 #endif
288
289 /**
290 * ap_query_functions(): Query supported functions.
291 * @qid: The AP queue number
292 * @functions: Pointer to functions field.
293 *
294 * Returns
295 * 0 on success.
296 * -ENODEV if queue not valid.
297 * -EBUSY if device busy.
298 * -EINVAL if query function is not supported
299 */
300 static int ap_query_functions(ap_qid_t qid, unsigned int *functions)
301 {
302 #ifdef CONFIG_64BIT
303 struct ap_queue_status status;
304 int i;
305 status = __ap_query_functions(qid, functions);
306
307 for (i = 0; i < AP_MAX_RESET; i++) {
308 if (ap_queue_status_invalid_test(&status))
309 return -ENODEV;
310
311 switch (status.response_code) {
312 case AP_RESPONSE_NORMAL:
313 return 0;
314 case AP_RESPONSE_RESET_IN_PROGRESS:
315 case AP_RESPONSE_BUSY:
316 break;
317 case AP_RESPONSE_Q_NOT_AVAIL:
318 case AP_RESPONSE_DECONFIGURED:
319 case AP_RESPONSE_CHECKSTOPPED:
320 case AP_RESPONSE_INVALID_ADDRESS:
321 return -ENODEV;
322 case AP_RESPONSE_OTHERWISE_CHANGED:
323 break;
324 default:
325 break;
326 }
327 if (i < AP_MAX_RESET - 1) {
328 udelay(5);
329 status = __ap_query_functions(qid, functions);
330 }
331 }
332 return -EBUSY;
333 #else
334 return -EINVAL;
335 #endif
336 }
337
338 /**
339 * ap_queue_enable_interruption(): Enable interruption on an AP.
340 * @qid: The AP queue number
341 * @ind: the notification indicator byte
342 *
343 * Enables interruption on AP queue via ap_queue_interruption_control(). Based
344 * on the return value it waits a while and tests the AP queue if interrupts
345 * have been switched on using ap_test_queue().
346 */
347 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
348 {
349 #ifdef CONFIG_64BIT
350 struct ap_queue_status status;
351 int t_depth, t_device_type, rc, i;
352
353 rc = -EBUSY;
354 status = ap_queue_interruption_control(qid, ind);
355
356 for (i = 0; i < AP_MAX_RESET; i++) {
357 switch (status.response_code) {
358 case AP_RESPONSE_NORMAL:
359 if (status.int_enabled)
360 return 0;
361 break;
362 case AP_RESPONSE_RESET_IN_PROGRESS:
363 case AP_RESPONSE_BUSY:
364 if (i < AP_MAX_RESET - 1) {
365 udelay(5);
366 status = ap_queue_interruption_control(qid,
367 ind);
368 continue;
369 }
370 break;
371 case AP_RESPONSE_Q_NOT_AVAIL:
372 case AP_RESPONSE_DECONFIGURED:
373 case AP_RESPONSE_CHECKSTOPPED:
374 case AP_RESPONSE_INVALID_ADDRESS:
375 return -ENODEV;
376 case AP_RESPONSE_OTHERWISE_CHANGED:
377 if (status.int_enabled)
378 return 0;
379 break;
380 default:
381 break;
382 }
383 if (i < AP_MAX_RESET - 1) {
384 udelay(5);
385 status = ap_test_queue(qid, &t_depth, &t_device_type);
386 }
387 }
388 return rc;
389 #else
390 return -EINVAL;
391 #endif
392 }
393
394 /**
395 * __ap_send(): Send message to adjunct processor queue.
396 * @qid: The AP queue number
397 * @psmid: The program supplied message identifier
398 * @msg: The message text
399 * @length: The message length
400 * @special: Special Bit
401 *
402 * Returns AP queue status structure.
403 * Condition code 1 on NQAP can't happen because the L bit is 1.
404 * Condition code 2 on NQAP also means the send is incomplete,
405 * because a segment boundary was reached. The NQAP is repeated.
406 */
407 static inline struct ap_queue_status
408 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
409 unsigned int special)
410 {
411 typedef struct { char _[length]; } msgblock;
412 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
413 register struct ap_queue_status reg1 asm ("1");
414 register unsigned long reg2 asm ("2") = (unsigned long) msg;
415 register unsigned long reg3 asm ("3") = (unsigned long) length;
416 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
417 register unsigned long reg5 asm ("5") = psmid & 0xffffffff;
418
419 if (special == 1)
420 reg0 |= 0x400000UL;
421
422 asm volatile (
423 "0: .long 0xb2ad0042\n" /* NQAP */
424 " brc 2,0b"
425 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
426 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
427 : "cc" );
428 return reg1;
429 }
430
431 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
432 {
433 struct ap_queue_status status;
434
435 status = __ap_send(qid, psmid, msg, length, 0);
436 switch (status.response_code) {
437 case AP_RESPONSE_NORMAL:
438 return 0;
439 case AP_RESPONSE_Q_FULL:
440 case AP_RESPONSE_RESET_IN_PROGRESS:
441 return -EBUSY;
442 case AP_RESPONSE_REQ_FAC_NOT_INST:
443 return -EINVAL;
444 default: /* Device is gone. */
445 return -ENODEV;
446 }
447 }
448 EXPORT_SYMBOL(ap_send);
449
450 /**
451 * __ap_recv(): Receive message from adjunct processor queue.
452 * @qid: The AP queue number
453 * @psmid: Pointer to program supplied message identifier
454 * @msg: The message text
455 * @length: The message length
456 *
457 * Returns AP queue status structure.
458 * Condition code 1 on DQAP means the receive has taken place
459 * but only partially. The response is incomplete, hence the
460 * DQAP is repeated.
461 * Condition code 2 on DQAP also means the receive is incomplete,
462 * this time because a segment boundary was reached. Again, the
463 * DQAP is repeated.
464 * Note that gpr2 is used by the DQAP instruction to keep track of
465 * any 'residual' length, in case the instruction gets interrupted.
466 * Hence it gets zeroed before the instruction.
467 */
468 static inline struct ap_queue_status
469 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
470 {
471 typedef struct { char _[length]; } msgblock;
472 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
473 register struct ap_queue_status reg1 asm ("1");
474 register unsigned long reg2 asm("2") = 0UL;
475 register unsigned long reg4 asm("4") = (unsigned long) msg;
476 register unsigned long reg5 asm("5") = (unsigned long) length;
477 register unsigned long reg6 asm("6") = 0UL;
478 register unsigned long reg7 asm("7") = 0UL;
479
480
481 asm volatile(
482 "0: .long 0xb2ae0064\n" /* DQAP */
483 " brc 6,0b\n"
484 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
485 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
486 "=m" (*(msgblock *) msg) : : "cc" );
487 *psmid = (((unsigned long long) reg6) << 32) + reg7;
488 return reg1;
489 }
490
491 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
492 {
493 struct ap_queue_status status;
494
495 status = __ap_recv(qid, psmid, msg, length);
496 switch (status.response_code) {
497 case AP_RESPONSE_NORMAL:
498 return 0;
499 case AP_RESPONSE_NO_PENDING_REPLY:
500 if (status.queue_empty)
501 return -ENOENT;
502 return -EBUSY;
503 case AP_RESPONSE_RESET_IN_PROGRESS:
504 return -EBUSY;
505 default:
506 return -ENODEV;
507 }
508 }
509 EXPORT_SYMBOL(ap_recv);
510
511 /**
512 * ap_query_queue(): Check if an AP queue is available.
513 * @qid: The AP queue number
514 * @queue_depth: Pointer to queue depth value
515 * @device_type: Pointer to device type value
516 *
517 * The test is repeated for AP_MAX_RESET times.
518 */
519 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
520 {
521 struct ap_queue_status status;
522 int t_depth, t_device_type, rc, i;
523
524 rc = -EBUSY;
525 for (i = 0; i < AP_MAX_RESET; i++) {
526 status = ap_test_queue(qid, &t_depth, &t_device_type);
527 switch (status.response_code) {
528 case AP_RESPONSE_NORMAL:
529 *queue_depth = t_depth + 1;
530 *device_type = t_device_type;
531 rc = 0;
532 break;
533 case AP_RESPONSE_Q_NOT_AVAIL:
534 rc = -ENODEV;
535 break;
536 case AP_RESPONSE_RESET_IN_PROGRESS:
537 break;
538 case AP_RESPONSE_DECONFIGURED:
539 rc = -ENODEV;
540 break;
541 case AP_RESPONSE_CHECKSTOPPED:
542 rc = -ENODEV;
543 break;
544 case AP_RESPONSE_INVALID_ADDRESS:
545 rc = -ENODEV;
546 break;
547 case AP_RESPONSE_OTHERWISE_CHANGED:
548 break;
549 case AP_RESPONSE_BUSY:
550 break;
551 default:
552 BUG();
553 }
554 if (rc != -EBUSY)
555 break;
556 if (i < AP_MAX_RESET - 1)
557 udelay(5);
558 }
559 return rc;
560 }
561
562 /**
563 * ap_init_queue(): Reset an AP queue.
564 * @qid: The AP queue number
565 *
566 * Reset an AP queue and wait for it to become available again.
567 */
568 static int ap_init_queue(ap_qid_t qid)
569 {
570 struct ap_queue_status status;
571 int rc, dummy, i;
572
573 rc = -ENODEV;
574 status = ap_reset_queue(qid);
575 for (i = 0; i < AP_MAX_RESET; i++) {
576 switch (status.response_code) {
577 case AP_RESPONSE_NORMAL:
578 if (status.queue_empty)
579 rc = 0;
580 break;
581 case AP_RESPONSE_Q_NOT_AVAIL:
582 case AP_RESPONSE_DECONFIGURED:
583 case AP_RESPONSE_CHECKSTOPPED:
584 i = AP_MAX_RESET; /* return with -ENODEV */
585 break;
586 case AP_RESPONSE_RESET_IN_PROGRESS:
587 rc = -EBUSY;
588 case AP_RESPONSE_BUSY:
589 default:
590 break;
591 }
592 if (rc != -ENODEV && rc != -EBUSY)
593 break;
594 if (i < AP_MAX_RESET - 1) {
595 /* Time we are waiting until we give up (0.7sec * 90).
596 * Since the actual request (in progress) will not
597 * interrupted immediately for the reset command,
598 * we have to be patient. In worst case we have to
599 * wait 60sec + reset time (some msec).
600 */
601 schedule_timeout(AP_RESET_TIMEOUT);
602 status = ap_test_queue(qid, &dummy, &dummy);
603 }
604 }
605 if (rc == 0 && ap_using_interrupts()) {
606 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr);
607 /* If interruption mode is supported by the machine,
608 * but an AP can not be enabled for interruption then
609 * the AP will be discarded. */
610 if (rc)
611 pr_err("Registering adapter interrupts for "
612 "AP %d failed\n", AP_QID_DEVICE(qid));
613 }
614 return rc;
615 }
616
617 /**
618 * ap_increase_queue_count(): Arm request timeout.
619 * @ap_dev: Pointer to an AP device.
620 *
621 * Arm request timeout if an AP device was idle and a new request is submitted.
622 */
623 static void ap_increase_queue_count(struct ap_device *ap_dev)
624 {
625 int timeout = ap_dev->drv->request_timeout;
626
627 ap_dev->queue_count++;
628 if (ap_dev->queue_count == 1) {
629 mod_timer(&ap_dev->timeout, jiffies + timeout);
630 ap_dev->reset = AP_RESET_ARMED;
631 }
632 }
633
634 /**
635 * ap_decrease_queue_count(): Decrease queue count.
636 * @ap_dev: Pointer to an AP device.
637 *
638 * If AP device is still alive, re-schedule request timeout if there are still
639 * pending requests.
640 */
641 static void ap_decrease_queue_count(struct ap_device *ap_dev)
642 {
643 int timeout = ap_dev->drv->request_timeout;
644
645 ap_dev->queue_count--;
646 if (ap_dev->queue_count > 0)
647 mod_timer(&ap_dev->timeout, jiffies + timeout);
648 else
649 /*
650 * The timeout timer should to be disabled now - since
651 * del_timer_sync() is very expensive, we just tell via the
652 * reset flag to ignore the pending timeout timer.
653 */
654 ap_dev->reset = AP_RESET_IGNORE;
655 }
656
657 /*
658 * AP device related attributes.
659 */
660 static ssize_t ap_hwtype_show(struct device *dev,
661 struct device_attribute *attr, char *buf)
662 {
663 struct ap_device *ap_dev = to_ap_dev(dev);
664 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
665 }
666
667 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
668
669 static ssize_t ap_raw_hwtype_show(struct device *dev,
670 struct device_attribute *attr, char *buf)
671 {
672 struct ap_device *ap_dev = to_ap_dev(dev);
673
674 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype);
675 }
676
677 static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL);
678
679 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
680 char *buf)
681 {
682 struct ap_device *ap_dev = to_ap_dev(dev);
683 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
684 }
685
686 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
687 static ssize_t ap_request_count_show(struct device *dev,
688 struct device_attribute *attr,
689 char *buf)
690 {
691 struct ap_device *ap_dev = to_ap_dev(dev);
692 int rc;
693
694 spin_lock_bh(&ap_dev->lock);
695 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
696 spin_unlock_bh(&ap_dev->lock);
697 return rc;
698 }
699
700 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
701
702 static ssize_t ap_requestq_count_show(struct device *dev,
703 struct device_attribute *attr, char *buf)
704 {
705 struct ap_device *ap_dev = to_ap_dev(dev);
706 int rc;
707
708 spin_lock_bh(&ap_dev->lock);
709 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count);
710 spin_unlock_bh(&ap_dev->lock);
711 return rc;
712 }
713
714 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL);
715
716 static ssize_t ap_pendingq_count_show(struct device *dev,
717 struct device_attribute *attr, char *buf)
718 {
719 struct ap_device *ap_dev = to_ap_dev(dev);
720 int rc;
721
722 spin_lock_bh(&ap_dev->lock);
723 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count);
724 spin_unlock_bh(&ap_dev->lock);
725 return rc;
726 }
727
728 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL);
729
730 static ssize_t ap_modalias_show(struct device *dev,
731 struct device_attribute *attr, char *buf)
732 {
733 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
734 }
735
736 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
737
738 static ssize_t ap_functions_show(struct device *dev,
739 struct device_attribute *attr, char *buf)
740 {
741 struct ap_device *ap_dev = to_ap_dev(dev);
742 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions);
743 }
744
745 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL);
746
747 static struct attribute *ap_dev_attrs[] = {
748 &dev_attr_hwtype.attr,
749 &dev_attr_raw_hwtype.attr,
750 &dev_attr_depth.attr,
751 &dev_attr_request_count.attr,
752 &dev_attr_requestq_count.attr,
753 &dev_attr_pendingq_count.attr,
754 &dev_attr_modalias.attr,
755 &dev_attr_ap_functions.attr,
756 NULL
757 };
758 static struct attribute_group ap_dev_attr_group = {
759 .attrs = ap_dev_attrs
760 };
761
762 /**
763 * ap_bus_match()
764 * @dev: Pointer to device
765 * @drv: Pointer to device_driver
766 *
767 * AP bus driver registration/unregistration.
768 */
769 static int ap_bus_match(struct device *dev, struct device_driver *drv)
770 {
771 struct ap_device *ap_dev = to_ap_dev(dev);
772 struct ap_driver *ap_drv = to_ap_drv(drv);
773 struct ap_device_id *id;
774
775 /*
776 * Compare device type of the device with the list of
777 * supported types of the device_driver.
778 */
779 for (id = ap_drv->ids; id->match_flags; id++) {
780 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
781 (id->dev_type != ap_dev->device_type))
782 continue;
783 return 1;
784 }
785 return 0;
786 }
787
788 /**
789 * ap_uevent(): Uevent function for AP devices.
790 * @dev: Pointer to device
791 * @env: Pointer to kobj_uevent_env
792 *
793 * It sets up a single environment variable DEV_TYPE which contains the
794 * hardware device type.
795 */
796 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
797 {
798 struct ap_device *ap_dev = to_ap_dev(dev);
799 int retval = 0;
800
801 if (!ap_dev)
802 return -ENODEV;
803
804 /* Set up DEV_TYPE environment variable. */
805 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
806 if (retval)
807 return retval;
808
809 /* Add MODALIAS= */
810 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
811
812 return retval;
813 }
814
815 static int ap_bus_suspend(struct device *dev, pm_message_t state)
816 {
817 struct ap_device *ap_dev = to_ap_dev(dev);
818 unsigned long flags;
819
820 if (!ap_suspend_flag) {
821 ap_suspend_flag = 1;
822
823 /* Disable scanning for devices, thus we do not want to scan
824 * for them after removing.
825 */
826 del_timer_sync(&ap_config_timer);
827 if (ap_work_queue != NULL) {
828 destroy_workqueue(ap_work_queue);
829 ap_work_queue = NULL;
830 }
831
832 tasklet_disable(&ap_tasklet);
833 }
834 /* Poll on the device until all requests are finished. */
835 do {
836 flags = 0;
837 spin_lock_bh(&ap_dev->lock);
838 __ap_poll_device(ap_dev, &flags);
839 spin_unlock_bh(&ap_dev->lock);
840 } while ((flags & 1) || (flags & 2));
841
842 spin_lock_bh(&ap_dev->lock);
843 ap_dev->unregistered = 1;
844 spin_unlock_bh(&ap_dev->lock);
845
846 return 0;
847 }
848
849 static int ap_bus_resume(struct device *dev)
850 {
851 struct ap_device *ap_dev = to_ap_dev(dev);
852 int rc;
853
854 if (ap_suspend_flag) {
855 ap_suspend_flag = 0;
856 if (ap_interrupts_available()) {
857 if (!ap_using_interrupts()) {
858 rc = register_adapter_interrupt(&ap_airq);
859 ap_airq_flag = (rc == 0);
860 }
861 } else {
862 if (ap_using_interrupts()) {
863 unregister_adapter_interrupt(&ap_airq);
864 ap_airq_flag = 0;
865 }
866 }
867 ap_query_configuration();
868 if (!user_set_domain) {
869 ap_domain_index = -1;
870 ap_select_domain();
871 }
872 init_timer(&ap_config_timer);
873 ap_config_timer.function = ap_config_timeout;
874 ap_config_timer.data = 0;
875 ap_config_timer.expires = jiffies + ap_config_time * HZ;
876 add_timer(&ap_config_timer);
877 ap_work_queue = create_singlethread_workqueue("kapwork");
878 if (!ap_work_queue)
879 return -ENOMEM;
880 tasklet_enable(&ap_tasklet);
881 if (!ap_using_interrupts())
882 ap_schedule_poll_timer();
883 else
884 tasklet_schedule(&ap_tasklet);
885 if (ap_thread_flag)
886 rc = ap_poll_thread_start();
887 else
888 rc = 0;
889 } else
890 rc = 0;
891 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) {
892 spin_lock_bh(&ap_dev->lock);
893 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid),
894 ap_domain_index);
895 spin_unlock_bh(&ap_dev->lock);
896 }
897 queue_work(ap_work_queue, &ap_config_work);
898
899 return rc;
900 }
901
902 static struct bus_type ap_bus_type = {
903 .name = "ap",
904 .match = &ap_bus_match,
905 .uevent = &ap_uevent,
906 .suspend = ap_bus_suspend,
907 .resume = ap_bus_resume
908 };
909
910 static int ap_device_probe(struct device *dev)
911 {
912 struct ap_device *ap_dev = to_ap_dev(dev);
913 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
914 int rc;
915
916 ap_dev->drv = ap_drv;
917
918 spin_lock_bh(&ap_device_list_lock);
919 list_add(&ap_dev->list, &ap_device_list);
920 spin_unlock_bh(&ap_device_list_lock);
921
922 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
923 if (rc) {
924 spin_lock_bh(&ap_device_list_lock);
925 list_del_init(&ap_dev->list);
926 spin_unlock_bh(&ap_device_list_lock);
927 }
928 return rc;
929 }
930
931 /**
932 * __ap_flush_queue(): Flush requests.
933 * @ap_dev: Pointer to the AP device
934 *
935 * Flush all requests from the request/pending queue of an AP device.
936 */
937 static void __ap_flush_queue(struct ap_device *ap_dev)
938 {
939 struct ap_message *ap_msg, *next;
940
941 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
942 list_del_init(&ap_msg->list);
943 ap_dev->pendingq_count--;
944 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
945 }
946 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
947 list_del_init(&ap_msg->list);
948 ap_dev->requestq_count--;
949 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
950 }
951 }
952
953 void ap_flush_queue(struct ap_device *ap_dev)
954 {
955 spin_lock_bh(&ap_dev->lock);
956 __ap_flush_queue(ap_dev);
957 spin_unlock_bh(&ap_dev->lock);
958 }
959 EXPORT_SYMBOL(ap_flush_queue);
960
961 static int ap_device_remove(struct device *dev)
962 {
963 struct ap_device *ap_dev = to_ap_dev(dev);
964 struct ap_driver *ap_drv = ap_dev->drv;
965
966 ap_flush_queue(ap_dev);
967 del_timer_sync(&ap_dev->timeout);
968 spin_lock_bh(&ap_device_list_lock);
969 list_del_init(&ap_dev->list);
970 spin_unlock_bh(&ap_device_list_lock);
971 if (ap_drv->remove)
972 ap_drv->remove(ap_dev);
973 spin_lock_bh(&ap_dev->lock);
974 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
975 spin_unlock_bh(&ap_dev->lock);
976 return 0;
977 }
978
979 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
980 char *name)
981 {
982 struct device_driver *drv = &ap_drv->driver;
983
984 drv->bus = &ap_bus_type;
985 drv->probe = ap_device_probe;
986 drv->remove = ap_device_remove;
987 drv->owner = owner;
988 drv->name = name;
989 return driver_register(drv);
990 }
991 EXPORT_SYMBOL(ap_driver_register);
992
993 void ap_driver_unregister(struct ap_driver *ap_drv)
994 {
995 driver_unregister(&ap_drv->driver);
996 }
997 EXPORT_SYMBOL(ap_driver_unregister);
998
999 void ap_bus_force_rescan(void)
1000 {
1001 /* reconfigure the AP bus rescan timer. */
1002 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
1003 /* processing a asynchronous bus rescan */
1004 queue_work(ap_work_queue, &ap_config_work);
1005 flush_work(&ap_config_work);
1006 }
1007 EXPORT_SYMBOL(ap_bus_force_rescan);
1008
1009 /*
1010 * AP bus attributes.
1011 */
1012 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
1013 {
1014 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
1015 }
1016
1017 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
1018
1019 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
1020 {
1021 if (ap_configuration != NULL) { /* QCI not supported */
1022 if (test_facility(76)) { /* format 1 - 256 bit domain field */
1023 return snprintf(buf, PAGE_SIZE,
1024 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
1025 ap_configuration->adm[0], ap_configuration->adm[1],
1026 ap_configuration->adm[2], ap_configuration->adm[3],
1027 ap_configuration->adm[4], ap_configuration->adm[5],
1028 ap_configuration->adm[6], ap_configuration->adm[7]);
1029 } else { /* format 0 - 16 bit domain field */
1030 return snprintf(buf, PAGE_SIZE, "%08x%08x\n",
1031 ap_configuration->adm[0], ap_configuration->adm[1]);
1032 }
1033 } else {
1034 return snprintf(buf, PAGE_SIZE, "not supported\n");
1035 }
1036 }
1037
1038 static BUS_ATTR(ap_control_domain_mask, 0444,
1039 ap_control_domain_mask_show, NULL);
1040
1041 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
1042 {
1043 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
1044 }
1045
1046 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
1047 {
1048 return snprintf(buf, PAGE_SIZE, "%d\n",
1049 ap_using_interrupts() ? 1 : 0);
1050 }
1051
1052 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL);
1053
1054 static ssize_t ap_config_time_store(struct bus_type *bus,
1055 const char *buf, size_t count)
1056 {
1057 int time;
1058
1059 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
1060 return -EINVAL;
1061 ap_config_time = time;
1062 if (!timer_pending(&ap_config_timer) ||
1063 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
1064 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1065 add_timer(&ap_config_timer);
1066 }
1067 return count;
1068 }
1069
1070 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
1071
1072 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
1073 {
1074 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
1075 }
1076
1077 static ssize_t ap_poll_thread_store(struct bus_type *bus,
1078 const char *buf, size_t count)
1079 {
1080 int flag, rc;
1081
1082 if (sscanf(buf, "%d\n", &flag) != 1)
1083 return -EINVAL;
1084 if (flag) {
1085 rc = ap_poll_thread_start();
1086 if (rc)
1087 return rc;
1088 }
1089 else
1090 ap_poll_thread_stop();
1091 return count;
1092 }
1093
1094 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
1095
1096 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
1097 {
1098 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
1099 }
1100
1101 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
1102 size_t count)
1103 {
1104 unsigned long long time;
1105 ktime_t hr_time;
1106
1107 /* 120 seconds = maximum poll interval */
1108 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
1109 time > 120000000000ULL)
1110 return -EINVAL;
1111 poll_timeout = time;
1112 hr_time = ktime_set(0, poll_timeout);
1113
1114 if (!hrtimer_is_queued(&ap_poll_timer) ||
1115 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
1116 hrtimer_set_expires(&ap_poll_timer, hr_time);
1117 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
1118 }
1119 return count;
1120 }
1121
1122 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store);
1123
1124 static struct bus_attribute *const ap_bus_attrs[] = {
1125 &bus_attr_ap_domain,
1126 &bus_attr_ap_control_domain_mask,
1127 &bus_attr_config_time,
1128 &bus_attr_poll_thread,
1129 &bus_attr_ap_interrupts,
1130 &bus_attr_poll_timeout,
1131 NULL,
1132 };
1133
1134 static inline int ap_test_config(unsigned int *field, unsigned int nr)
1135 {
1136 if (nr > 0xFFu)
1137 return 0;
1138 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f));
1139 }
1140
1141 /*
1142 * ap_test_config_card_id(): Test, whether an AP card ID is configured.
1143 * @id AP card ID
1144 *
1145 * Returns 0 if the card is not configured
1146 * 1 if the card is configured or
1147 * if the configuration information is not available
1148 */
1149 static inline int ap_test_config_card_id(unsigned int id)
1150 {
1151 if (!ap_configuration)
1152 return 1;
1153 return ap_test_config(ap_configuration->apm, id);
1154 }
1155
1156 /*
1157 * ap_test_config_domain(): Test, whether an AP usage domain is configured.
1158 * @domain AP usage domain ID
1159 *
1160 * Returns 0 if the usage domain is not configured
1161 * 1 if the usage domain is configured or
1162 * if the configuration information is not available
1163 */
1164 static inline int ap_test_config_domain(unsigned int domain)
1165 {
1166 if (!ap_configuration)
1167 return 1;
1168 return ap_test_config(ap_configuration->aqm, domain);
1169 }
1170
1171 /**
1172 * ap_query_configuration(): Query AP configuration information.
1173 *
1174 * Query information of installed cards and configured domains from AP.
1175 */
1176 static void ap_query_configuration(void)
1177 {
1178 #ifdef CONFIG_64BIT
1179 if (ap_configuration_available()) {
1180 if (!ap_configuration)
1181 ap_configuration =
1182 kzalloc(sizeof(struct ap_config_info),
1183 GFP_KERNEL);
1184 if (ap_configuration)
1185 __ap_query_configuration(ap_configuration);
1186 } else
1187 ap_configuration = NULL;
1188 #else
1189 ap_configuration = NULL;
1190 #endif
1191 }
1192
1193 /**
1194 * ap_select_domain(): Select an AP domain.
1195 *
1196 * Pick one of the 16 AP domains.
1197 */
1198 static int ap_select_domain(void)
1199 {
1200 int queue_depth, device_type, count, max_count, best_domain;
1201 ap_qid_t qid;
1202 int rc, i, j;
1203
1204 /* IF APXA isn't installed, only 16 domains could be defined */
1205 if (!ap_configuration->ap_extended && (ap_domain_index > 15))
1206 return -EINVAL;
1207
1208 /*
1209 * We want to use a single domain. Either the one specified with
1210 * the "domain=" parameter or the domain with the maximum number
1211 * of devices.
1212 */
1213 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
1214 /* Domain has already been selected. */
1215 return 0;
1216 best_domain = -1;
1217 max_count = 0;
1218 for (i = 0; i < AP_DOMAINS; i++) {
1219 if (!ap_test_config_domain(i))
1220 continue;
1221 count = 0;
1222 for (j = 0; j < AP_DEVICES; j++) {
1223 if (!ap_test_config_card_id(j))
1224 continue;
1225 qid = AP_MKQID(j, i);
1226 rc = ap_query_queue(qid, &queue_depth, &device_type);
1227 if (rc)
1228 continue;
1229 count++;
1230 }
1231 if (count > max_count) {
1232 max_count = count;
1233 best_domain = i;
1234 }
1235 }
1236 if (best_domain >= 0){
1237 ap_domain_index = best_domain;
1238 return 0;
1239 }
1240 return -ENODEV;
1241 }
1242
1243 /**
1244 * ap_probe_device_type(): Find the device type of an AP.
1245 * @ap_dev: pointer to the AP device.
1246 *
1247 * Find the device type if query queue returned a device type of 0.
1248 */
1249 static int ap_probe_device_type(struct ap_device *ap_dev)
1250 {
1251 static unsigned char msg[] = {
1252 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
1253 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1254 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
1255 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1256 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
1257 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
1258 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
1259 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
1260 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1261 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
1262 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1263 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
1264 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
1265 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1266 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
1267 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1268 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1269 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1270 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1271 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1272 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1273 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
1274 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
1275 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
1276 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
1277 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
1278 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
1279 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1280 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
1281 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
1282 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
1283 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
1284 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
1285 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
1286 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
1287 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
1288 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
1289 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
1290 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
1291 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
1292 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
1293 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
1294 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
1295 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
1296 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
1297 };
1298 struct ap_queue_status status;
1299 unsigned long long psmid;
1300 char *reply;
1301 int rc, i;
1302
1303 reply = (void *) get_zeroed_page(GFP_KERNEL);
1304 if (!reply) {
1305 rc = -ENOMEM;
1306 goto out;
1307 }
1308
1309 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
1310 msg, sizeof(msg), 0);
1311 if (status.response_code != AP_RESPONSE_NORMAL) {
1312 rc = -ENODEV;
1313 goto out_free;
1314 }
1315
1316 /* Wait for the test message to complete. */
1317 for (i = 0; i < 6; i++) {
1318 mdelay(300);
1319 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
1320 if (status.response_code == AP_RESPONSE_NORMAL &&
1321 psmid == 0x0102030405060708ULL)
1322 break;
1323 }
1324 if (i < 6) {
1325 /* Got an answer. */
1326 if (reply[0] == 0x00 && reply[1] == 0x86)
1327 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
1328 else
1329 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
1330 rc = 0;
1331 } else
1332 rc = -ENODEV;
1333
1334 out_free:
1335 free_page((unsigned long) reply);
1336 out:
1337 return rc;
1338 }
1339
1340 static void ap_interrupt_handler(struct airq_struct *airq)
1341 {
1342 inc_irq_stat(IRQIO_APB);
1343 tasklet_schedule(&ap_tasklet);
1344 }
1345
1346 /**
1347 * __ap_scan_bus(): Scan the AP bus.
1348 * @dev: Pointer to device
1349 * @data: Pointer to data
1350 *
1351 * Scan the AP bus for new devices.
1352 */
1353 static int __ap_scan_bus(struct device *dev, void *data)
1354 {
1355 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
1356 }
1357
1358 static void ap_device_release(struct device *dev)
1359 {
1360 struct ap_device *ap_dev = to_ap_dev(dev);
1361
1362 kfree(ap_dev);
1363 }
1364
1365 static void ap_scan_bus(struct work_struct *unused)
1366 {
1367 struct ap_device *ap_dev;
1368 struct device *dev;
1369 ap_qid_t qid;
1370 int queue_depth, device_type;
1371 unsigned int device_functions;
1372 int rc, i;
1373
1374 ap_query_configuration();
1375 if (ap_select_domain() != 0) {
1376 return;
1377 }
1378 for (i = 0; i < AP_DEVICES; i++) {
1379 qid = AP_MKQID(i, ap_domain_index);
1380 dev = bus_find_device(&ap_bus_type, NULL,
1381 (void *)(unsigned long)qid,
1382 __ap_scan_bus);
1383 if (ap_test_config_card_id(i))
1384 rc = ap_query_queue(qid, &queue_depth, &device_type);
1385 else
1386 rc = -ENODEV;
1387 if (dev) {
1388 if (rc == -EBUSY) {
1389 set_current_state(TASK_UNINTERRUPTIBLE);
1390 schedule_timeout(AP_RESET_TIMEOUT);
1391 rc = ap_query_queue(qid, &queue_depth,
1392 &device_type);
1393 }
1394 ap_dev = to_ap_dev(dev);
1395 spin_lock_bh(&ap_dev->lock);
1396 if (rc || ap_dev->unregistered) {
1397 spin_unlock_bh(&ap_dev->lock);
1398 if (ap_dev->unregistered)
1399 i--;
1400 device_unregister(dev);
1401 put_device(dev);
1402 continue;
1403 }
1404 spin_unlock_bh(&ap_dev->lock);
1405 put_device(dev);
1406 continue;
1407 }
1408 if (rc)
1409 continue;
1410 rc = ap_init_queue(qid);
1411 if (rc)
1412 continue;
1413 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
1414 if (!ap_dev)
1415 break;
1416 ap_dev->qid = qid;
1417 ap_dev->queue_depth = queue_depth;
1418 ap_dev->unregistered = 1;
1419 spin_lock_init(&ap_dev->lock);
1420 INIT_LIST_HEAD(&ap_dev->pendingq);
1421 INIT_LIST_HEAD(&ap_dev->requestq);
1422 INIT_LIST_HEAD(&ap_dev->list);
1423 setup_timer(&ap_dev->timeout, ap_request_timeout,
1424 (unsigned long) ap_dev);
1425 switch (device_type) {
1426 case 0:
1427 /* device type probing for old cards */
1428 if (ap_probe_device_type(ap_dev)) {
1429 kfree(ap_dev);
1430 continue;
1431 }
1432 break;
1433 case 11:
1434 ap_dev->device_type = 10;
1435 break;
1436 default:
1437 ap_dev->device_type = device_type;
1438 }
1439 ap_dev->raw_hwtype = device_type;
1440
1441 rc = ap_query_functions(qid, &device_functions);
1442 if (!rc)
1443 ap_dev->functions = device_functions;
1444 else
1445 ap_dev->functions = 0u;
1446
1447 ap_dev->device.bus = &ap_bus_type;
1448 ap_dev->device.parent = ap_root_device;
1449 if (dev_set_name(&ap_dev->device, "card%02x",
1450 AP_QID_DEVICE(ap_dev->qid))) {
1451 kfree(ap_dev);
1452 continue;
1453 }
1454 ap_dev->device.release = ap_device_release;
1455 rc = device_register(&ap_dev->device);
1456 if (rc) {
1457 put_device(&ap_dev->device);
1458 continue;
1459 }
1460 /* Add device attributes. */
1461 rc = sysfs_create_group(&ap_dev->device.kobj,
1462 &ap_dev_attr_group);
1463 if (!rc) {
1464 spin_lock_bh(&ap_dev->lock);
1465 ap_dev->unregistered = 0;
1466 spin_unlock_bh(&ap_dev->lock);
1467 }
1468 else
1469 device_unregister(&ap_dev->device);
1470 }
1471 }
1472
1473 static void
1474 ap_config_timeout(unsigned long ptr)
1475 {
1476 queue_work(ap_work_queue, &ap_config_work);
1477 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1478 add_timer(&ap_config_timer);
1479 }
1480
1481 /**
1482 * __ap_schedule_poll_timer(): Schedule poll timer.
1483 *
1484 * Set up the timer to run the poll tasklet
1485 */
1486 static inline void __ap_schedule_poll_timer(void)
1487 {
1488 ktime_t hr_time;
1489
1490 spin_lock_bh(&ap_poll_timer_lock);
1491 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
1492 goto out;
1493 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
1494 hr_time = ktime_set(0, poll_timeout);
1495 hrtimer_forward_now(&ap_poll_timer, hr_time);
1496 hrtimer_restart(&ap_poll_timer);
1497 }
1498 out:
1499 spin_unlock_bh(&ap_poll_timer_lock);
1500 }
1501
1502 /**
1503 * ap_schedule_poll_timer(): Schedule poll timer.
1504 *
1505 * Set up the timer to run the poll tasklet
1506 */
1507 static inline void ap_schedule_poll_timer(void)
1508 {
1509 if (ap_using_interrupts())
1510 return;
1511 __ap_schedule_poll_timer();
1512 }
1513
1514 /**
1515 * ap_poll_read(): Receive pending reply messages from an AP device.
1516 * @ap_dev: pointer to the AP device
1517 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1518 * required, bit 2^1 is set if the poll timer needs to get armed
1519 *
1520 * Returns 0 if the device is still present, -ENODEV if not.
1521 */
1522 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1523 {
1524 struct ap_queue_status status;
1525 struct ap_message *ap_msg;
1526
1527 if (ap_dev->queue_count <= 0)
1528 return 0;
1529 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
1530 ap_dev->reply->message, ap_dev->reply->length);
1531 switch (status.response_code) {
1532 case AP_RESPONSE_NORMAL:
1533 atomic_dec(&ap_poll_requests);
1534 ap_decrease_queue_count(ap_dev);
1535 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
1536 if (ap_msg->psmid != ap_dev->reply->psmid)
1537 continue;
1538 list_del_init(&ap_msg->list);
1539 ap_dev->pendingq_count--;
1540 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply);
1541 break;
1542 }
1543 if (ap_dev->queue_count > 0)
1544 *flags |= 1;
1545 break;
1546 case AP_RESPONSE_NO_PENDING_REPLY:
1547 if (status.queue_empty) {
1548 /* The card shouldn't forget requests but who knows. */
1549 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1550 ap_dev->queue_count = 0;
1551 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1552 ap_dev->requestq_count += ap_dev->pendingq_count;
1553 ap_dev->pendingq_count = 0;
1554 } else
1555 *flags |= 2;
1556 break;
1557 default:
1558 return -ENODEV;
1559 }
1560 return 0;
1561 }
1562
1563 /**
1564 * ap_poll_write(): Send messages from the request queue to an AP device.
1565 * @ap_dev: pointer to the AP device
1566 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1567 * required, bit 2^1 is set if the poll timer needs to get armed
1568 *
1569 * Returns 0 if the device is still present, -ENODEV if not.
1570 */
1571 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1572 {
1573 struct ap_queue_status status;
1574 struct ap_message *ap_msg;
1575
1576 if (ap_dev->requestq_count <= 0 ||
1577 ap_dev->queue_count >= ap_dev->queue_depth)
1578 return 0;
1579 /* Start the next request on the queue. */
1580 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
1581 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1582 ap_msg->message, ap_msg->length, ap_msg->special);
1583 switch (status.response_code) {
1584 case AP_RESPONSE_NORMAL:
1585 atomic_inc(&ap_poll_requests);
1586 ap_increase_queue_count(ap_dev);
1587 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
1588 ap_dev->requestq_count--;
1589 ap_dev->pendingq_count++;
1590 if (ap_dev->queue_count < ap_dev->queue_depth &&
1591 ap_dev->requestq_count > 0)
1592 *flags |= 1;
1593 *flags |= 2;
1594 break;
1595 case AP_RESPONSE_RESET_IN_PROGRESS:
1596 __ap_schedule_poll_timer();
1597 case AP_RESPONSE_Q_FULL:
1598 *flags |= 2;
1599 break;
1600 case AP_RESPONSE_MESSAGE_TOO_BIG:
1601 case AP_RESPONSE_REQ_FAC_NOT_INST:
1602 return -EINVAL;
1603 default:
1604 return -ENODEV;
1605 }
1606 return 0;
1607 }
1608
1609 /**
1610 * ap_poll_queue(): Poll AP device for pending replies and send new messages.
1611 * @ap_dev: pointer to the bus device
1612 * @flags: pointer to control flags, bit 2^0 is set if another poll is
1613 * required, bit 2^1 is set if the poll timer needs to get armed
1614 *
1615 * Poll AP device for pending replies and send new messages. If either
1616 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
1617 * Returns 0.
1618 */
1619 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
1620 {
1621 int rc;
1622
1623 rc = ap_poll_read(ap_dev, flags);
1624 if (rc)
1625 return rc;
1626 return ap_poll_write(ap_dev, flags);
1627 }
1628
1629 /**
1630 * __ap_queue_message(): Queue a message to a device.
1631 * @ap_dev: pointer to the AP device
1632 * @ap_msg: the message to be queued
1633 *
1634 * Queue a message to a device. Returns 0 if successful.
1635 */
1636 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1637 {
1638 struct ap_queue_status status;
1639
1640 if (list_empty(&ap_dev->requestq) &&
1641 ap_dev->queue_count < ap_dev->queue_depth) {
1642 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1643 ap_msg->message, ap_msg->length,
1644 ap_msg->special);
1645 switch (status.response_code) {
1646 case AP_RESPONSE_NORMAL:
1647 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1648 atomic_inc(&ap_poll_requests);
1649 ap_dev->pendingq_count++;
1650 ap_increase_queue_count(ap_dev);
1651 ap_dev->total_request_count++;
1652 break;
1653 case AP_RESPONSE_Q_FULL:
1654 case AP_RESPONSE_RESET_IN_PROGRESS:
1655 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1656 ap_dev->requestq_count++;
1657 ap_dev->total_request_count++;
1658 return -EBUSY;
1659 case AP_RESPONSE_REQ_FAC_NOT_INST:
1660 case AP_RESPONSE_MESSAGE_TOO_BIG:
1661 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1662 return -EINVAL;
1663 default: /* Device is gone. */
1664 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1665 return -ENODEV;
1666 }
1667 } else {
1668 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1669 ap_dev->requestq_count++;
1670 ap_dev->total_request_count++;
1671 return -EBUSY;
1672 }
1673 ap_schedule_poll_timer();
1674 return 0;
1675 }
1676
1677 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1678 {
1679 unsigned long flags;
1680 int rc;
1681
1682 /* For asynchronous message handling a valid receive-callback
1683 * is required. */
1684 BUG_ON(!ap_msg->receive);
1685
1686 spin_lock_bh(&ap_dev->lock);
1687 if (!ap_dev->unregistered) {
1688 /* Make room on the queue by polling for finished requests. */
1689 rc = ap_poll_queue(ap_dev, &flags);
1690 if (!rc)
1691 rc = __ap_queue_message(ap_dev, ap_msg);
1692 if (!rc)
1693 wake_up(&ap_poll_wait);
1694 if (rc == -ENODEV)
1695 ap_dev->unregistered = 1;
1696 } else {
1697 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1698 rc = -ENODEV;
1699 }
1700 spin_unlock_bh(&ap_dev->lock);
1701 if (rc == -ENODEV)
1702 device_unregister(&ap_dev->device);
1703 }
1704 EXPORT_SYMBOL(ap_queue_message);
1705
1706 /**
1707 * ap_cancel_message(): Cancel a crypto request.
1708 * @ap_dev: The AP device that has the message queued
1709 * @ap_msg: The message that is to be removed
1710 *
1711 * Cancel a crypto request. This is done by removing the request
1712 * from the device pending or request queue. Note that the
1713 * request stays on the AP queue. When it finishes the message
1714 * reply will be discarded because the psmid can't be found.
1715 */
1716 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1717 {
1718 struct ap_message *tmp;
1719
1720 spin_lock_bh(&ap_dev->lock);
1721 if (!list_empty(&ap_msg->list)) {
1722 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1723 if (tmp->psmid == ap_msg->psmid) {
1724 ap_dev->pendingq_count--;
1725 goto found;
1726 }
1727 ap_dev->requestq_count--;
1728 found:
1729 list_del_init(&ap_msg->list);
1730 }
1731 spin_unlock_bh(&ap_dev->lock);
1732 }
1733 EXPORT_SYMBOL(ap_cancel_message);
1734
1735 /**
1736 * ap_poll_timeout(): AP receive polling for finished AP requests.
1737 * @unused: Unused pointer.
1738 *
1739 * Schedules the AP tasklet using a high resolution timer.
1740 */
1741 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
1742 {
1743 tasklet_schedule(&ap_tasklet);
1744 return HRTIMER_NORESTART;
1745 }
1746
1747 /**
1748 * ap_reset(): Reset a not responding AP device.
1749 * @ap_dev: Pointer to the AP device
1750 *
1751 * Reset a not responding AP device and move all requests from the
1752 * pending queue to the request queue.
1753 */
1754 static void ap_reset(struct ap_device *ap_dev)
1755 {
1756 int rc;
1757
1758 ap_dev->reset = AP_RESET_IGNORE;
1759 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1760 ap_dev->queue_count = 0;
1761 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1762 ap_dev->requestq_count += ap_dev->pendingq_count;
1763 ap_dev->pendingq_count = 0;
1764 rc = ap_init_queue(ap_dev->qid);
1765 if (rc == -ENODEV)
1766 ap_dev->unregistered = 1;
1767 else
1768 __ap_schedule_poll_timer();
1769 }
1770
1771 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
1772 {
1773 if (!ap_dev->unregistered) {
1774 if (ap_poll_queue(ap_dev, flags))
1775 ap_dev->unregistered = 1;
1776 if (ap_dev->reset == AP_RESET_DO)
1777 ap_reset(ap_dev);
1778 }
1779 return 0;
1780 }
1781
1782 /**
1783 * ap_poll_all(): Poll all AP devices.
1784 * @dummy: Unused variable
1785 *
1786 * Poll all AP devices on the bus in a round robin fashion. Continue
1787 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1788 * of the control flags has been set arm the poll timer.
1789 */
1790 static void ap_poll_all(unsigned long dummy)
1791 {
1792 unsigned long flags;
1793 struct ap_device *ap_dev;
1794
1795 /* Reset the indicator if interrupts are used. Thus new interrupts can
1796 * be received. Doing it in the beginning of the tasklet is therefor
1797 * important that no requests on any AP get lost.
1798 */
1799 if (ap_using_interrupts())
1800 xchg(ap_airq.lsi_ptr, 0);
1801 do {
1802 flags = 0;
1803 spin_lock(&ap_device_list_lock);
1804 list_for_each_entry(ap_dev, &ap_device_list, list) {
1805 spin_lock(&ap_dev->lock);
1806 __ap_poll_device(ap_dev, &flags);
1807 spin_unlock(&ap_dev->lock);
1808 }
1809 spin_unlock(&ap_device_list_lock);
1810 } while (flags & 1);
1811 if (flags & 2)
1812 ap_schedule_poll_timer();
1813 }
1814
1815 /**
1816 * ap_poll_thread(): Thread that polls for finished requests.
1817 * @data: Unused pointer
1818 *
1819 * AP bus poll thread. The purpose of this thread is to poll for
1820 * finished requests in a loop if there is a "free" cpu - that is
1821 * a cpu that doesn't have anything better to do. The polling stops
1822 * as soon as there is another task or if all messages have been
1823 * delivered.
1824 */
1825 static int ap_poll_thread(void *data)
1826 {
1827 DECLARE_WAITQUEUE(wait, current);
1828 unsigned long flags;
1829 int requests;
1830 struct ap_device *ap_dev;
1831
1832 set_user_nice(current, MAX_NICE);
1833 while (1) {
1834 if (ap_suspend_flag)
1835 return 0;
1836 if (need_resched()) {
1837 schedule();
1838 continue;
1839 }
1840 add_wait_queue(&ap_poll_wait, &wait);
1841 set_current_state(TASK_INTERRUPTIBLE);
1842 if (kthread_should_stop())
1843 break;
1844 requests = atomic_read(&ap_poll_requests);
1845 if (requests <= 0)
1846 schedule();
1847 set_current_state(TASK_RUNNING);
1848 remove_wait_queue(&ap_poll_wait, &wait);
1849
1850 flags = 0;
1851 spin_lock_bh(&ap_device_list_lock);
1852 list_for_each_entry(ap_dev, &ap_device_list, list) {
1853 spin_lock(&ap_dev->lock);
1854 __ap_poll_device(ap_dev, &flags);
1855 spin_unlock(&ap_dev->lock);
1856 }
1857 spin_unlock_bh(&ap_device_list_lock);
1858 }
1859 set_current_state(TASK_RUNNING);
1860 remove_wait_queue(&ap_poll_wait, &wait);
1861 return 0;
1862 }
1863
1864 static int ap_poll_thread_start(void)
1865 {
1866 int rc;
1867
1868 if (ap_using_interrupts() || ap_suspend_flag)
1869 return 0;
1870 mutex_lock(&ap_poll_thread_mutex);
1871 if (!ap_poll_kthread) {
1872 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1873 rc = PTR_RET(ap_poll_kthread);
1874 if (rc)
1875 ap_poll_kthread = NULL;
1876 }
1877 else
1878 rc = 0;
1879 mutex_unlock(&ap_poll_thread_mutex);
1880 return rc;
1881 }
1882
1883 static void ap_poll_thread_stop(void)
1884 {
1885 mutex_lock(&ap_poll_thread_mutex);
1886 if (ap_poll_kthread) {
1887 kthread_stop(ap_poll_kthread);
1888 ap_poll_kthread = NULL;
1889 }
1890 mutex_unlock(&ap_poll_thread_mutex);
1891 }
1892
1893 /**
1894 * ap_request_timeout(): Handling of request timeouts
1895 * @data: Holds the AP device.
1896 *
1897 * Handles request timeouts.
1898 */
1899 static void ap_request_timeout(unsigned long data)
1900 {
1901 struct ap_device *ap_dev = (struct ap_device *) data;
1902
1903 if (ap_dev->reset == AP_RESET_ARMED) {
1904 ap_dev->reset = AP_RESET_DO;
1905
1906 if (ap_using_interrupts())
1907 tasklet_schedule(&ap_tasklet);
1908 }
1909 }
1910
1911 static void ap_reset_domain(void)
1912 {
1913 int i;
1914
1915 if (ap_domain_index != -1)
1916 for (i = 0; i < AP_DEVICES; i++)
1917 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1918 }
1919
1920 static void ap_reset_all(void)
1921 {
1922 int i, j;
1923
1924 for (i = 0; i < AP_DOMAINS; i++) {
1925 if (!ap_test_config_domain(i))
1926 continue;
1927 for (j = 0; j < AP_DEVICES; j++) {
1928 if (!ap_test_config_card_id(j))
1929 continue;
1930 ap_reset_queue(AP_MKQID(j, i));
1931 }
1932 }
1933 }
1934
1935 static struct reset_call ap_reset_call = {
1936 .fn = ap_reset_all,
1937 };
1938
1939 /**
1940 * ap_module_init(): The module initialization code.
1941 *
1942 * Initializes the module.
1943 */
1944 int __init ap_module_init(void)
1945 {
1946 int rc, i;
1947
1948 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1949 pr_warning("%d is not a valid cryptographic domain\n",
1950 ap_domain_index);
1951 return -EINVAL;
1952 }
1953 /* In resume callback we need to know if the user had set the domain.
1954 * If so, we can not just reset it.
1955 */
1956 if (ap_domain_index >= 0)
1957 user_set_domain = 1;
1958
1959 if (ap_instructions_available() != 0) {
1960 pr_warning("The hardware system does not support "
1961 "AP instructions\n");
1962 return -ENODEV;
1963 }
1964 if (ap_interrupts_available()) {
1965 rc = register_adapter_interrupt(&ap_airq);
1966 ap_airq_flag = (rc == 0);
1967 }
1968
1969 register_reset_call(&ap_reset_call);
1970
1971 /* Create /sys/bus/ap. */
1972 rc = bus_register(&ap_bus_type);
1973 if (rc)
1974 goto out;
1975 for (i = 0; ap_bus_attrs[i]; i++) {
1976 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1977 if (rc)
1978 goto out_bus;
1979 }
1980
1981 /* Create /sys/devices/ap. */
1982 ap_root_device = root_device_register("ap");
1983 rc = PTR_RET(ap_root_device);
1984 if (rc)
1985 goto out_bus;
1986
1987 ap_work_queue = create_singlethread_workqueue("kapwork");
1988 if (!ap_work_queue) {
1989 rc = -ENOMEM;
1990 goto out_root;
1991 }
1992
1993 ap_query_configuration();
1994 if (ap_select_domain() == 0)
1995 ap_scan_bus(NULL);
1996
1997 /* Setup the AP bus rescan timer. */
1998 init_timer(&ap_config_timer);
1999 ap_config_timer.function = ap_config_timeout;
2000 ap_config_timer.data = 0;
2001 ap_config_timer.expires = jiffies + ap_config_time * HZ;
2002 add_timer(&ap_config_timer);
2003
2004 /* Setup the high resultion poll timer.
2005 * If we are running under z/VM adjust polling to z/VM polling rate.
2006 */
2007 if (MACHINE_IS_VM)
2008 poll_timeout = 1500000;
2009 spin_lock_init(&ap_poll_timer_lock);
2010 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2011 ap_poll_timer.function = ap_poll_timeout;
2012
2013 /* Start the low priority AP bus poll thread. */
2014 if (ap_thread_flag) {
2015 rc = ap_poll_thread_start();
2016 if (rc)
2017 goto out_work;
2018 }
2019
2020 return 0;
2021
2022 out_work:
2023 del_timer_sync(&ap_config_timer);
2024 hrtimer_cancel(&ap_poll_timer);
2025 destroy_workqueue(ap_work_queue);
2026 out_root:
2027 root_device_unregister(ap_root_device);
2028 out_bus:
2029 while (i--)
2030 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2031 bus_unregister(&ap_bus_type);
2032 out:
2033 unregister_reset_call(&ap_reset_call);
2034 if (ap_using_interrupts())
2035 unregister_adapter_interrupt(&ap_airq);
2036 return rc;
2037 }
2038
2039 static int __ap_match_all(struct device *dev, void *data)
2040 {
2041 return 1;
2042 }
2043
2044 /**
2045 * ap_modules_exit(): The module termination code
2046 *
2047 * Terminates the module.
2048 */
2049 void ap_module_exit(void)
2050 {
2051 int i;
2052 struct device *dev;
2053
2054 ap_reset_domain();
2055 ap_poll_thread_stop();
2056 del_timer_sync(&ap_config_timer);
2057 hrtimer_cancel(&ap_poll_timer);
2058 destroy_workqueue(ap_work_queue);
2059 tasklet_kill(&ap_tasklet);
2060 root_device_unregister(ap_root_device);
2061 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
2062 __ap_match_all)))
2063 {
2064 device_unregister(dev);
2065 put_device(dev);
2066 }
2067 for (i = 0; ap_bus_attrs[i]; i++)
2068 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
2069 bus_unregister(&ap_bus_type);
2070 unregister_reset_call(&ap_reset_call);
2071 if (ap_using_interrupts())
2072 unregister_adapter_interrupt(&ap_airq);
2073 }
2074
2075 module_init(ap_module_init);
2076 module_exit(ap_module_exit);