]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/s390/crypto/ap_bus.c
[S390] uaccess: Always access the correct address space.
[mirror_ubuntu-bionic-kernel.git] / drivers / s390 / crypto / ap_bus.c
CommitLineData
1534c382
MS
1/*
2 * linux/drivers/s390/crypto/ap_bus.c
3 *
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
8 *
9 * Adjunct processor bus.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/err.h>
30#include <linux/interrupt.h>
31#include <linux/workqueue.h>
32#include <linux/notifier.h>
33#include <linux/kthread.h>
34#include <linux/mutex.h>
35#include <asm/s390_rdev.h>
85eca850 36#include <asm/reset.h>
1534c382
MS
37
38#include "ap_bus.h"
39
40/* Some prototypes. */
4927b3f7 41static void ap_scan_bus(struct work_struct *);
1534c382
MS
42static void ap_poll_all(unsigned long);
43static void ap_poll_timeout(unsigned long);
44static int ap_poll_thread_start(void);
45static void ap_poll_thread_stop(void);
af512ed0 46static void ap_request_timeout(unsigned long);
1534c382
MS
47
48/**
49 * Module description.
50 */
51MODULE_AUTHOR("IBM Corporation");
52MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
53 "Copyright 2006 IBM Corporation");
54MODULE_LICENSE("GPL");
55
56/**
57 * Module parameter
58 */
59int ap_domain_index = -1; /* Adjunct Processor Domain Index */
60module_param_named(domain, ap_domain_index, int, 0000);
61MODULE_PARM_DESC(domain, "domain index for ap devices");
62EXPORT_SYMBOL(ap_domain_index);
63
b90b34c6 64static int ap_thread_flag = 0;
1534c382 65module_param_named(poll_thread, ap_thread_flag, int, 0000);
b90b34c6 66MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
1534c382
MS
67
68static struct device *ap_root_device = NULL;
cf352ce0
RW
69static DEFINE_SPINLOCK(ap_device_lock);
70static LIST_HEAD(ap_device_list);
1534c382
MS
71
72/**
73 * Workqueue & timer for bus rescan.
74 */
75static struct workqueue_struct *ap_work_queue;
76static struct timer_list ap_config_timer;
77static int ap_config_time = AP_CONFIG_TIME;
4927b3f7 78static DECLARE_WORK(ap_config_work, ap_scan_bus);
1534c382
MS
79
80/**
81 * Tasklet & timer for AP request polling.
82 */
83static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
84static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
85static atomic_t ap_poll_requests = ATOMIC_INIT(0);
86static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
87static struct task_struct *ap_poll_kthread = NULL;
88static DEFINE_MUTEX(ap_poll_thread_mutex);
89
90/**
91 * Test if ap instructions are available.
92 *
93 * Returns 0 if the ap instructions are installed.
94 */
95static inline int ap_instructions_available(void)
96{
97 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
98 register unsigned long reg1 asm ("1") = -ENODEV;
99 register unsigned long reg2 asm ("2") = 0UL;
100
101 asm volatile(
102 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
103 "0: la %1,0\n"
104 "1:\n"
105 EX_TABLE(0b, 1b)
106 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
107 return reg1;
108}
109
110/**
111 * Test adjunct processor queue.
112 * @qid: the ap queue number
113 * @queue_depth: pointer to queue depth value
114 * @device_type: pointer to device type value
115 *
116 * Returns ap queue status structure.
117 */
118static inline struct ap_queue_status
119ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
120{
121 register unsigned long reg0 asm ("0") = qid;
122 register struct ap_queue_status reg1 asm ("1");
123 register unsigned long reg2 asm ("2") = 0UL;
124
125 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
126 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
127 *device_type = (int) (reg2 >> 24);
128 *queue_depth = (int) (reg2 & 0xff);
129 return reg1;
130}
131
132/**
133 * Reset adjunct processor queue.
134 * @qid: the ap queue number
135 *
136 * Returns ap queue status structure.
137 */
138static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
139{
140 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
141 register struct ap_queue_status reg1 asm ("1");
142 register unsigned long reg2 asm ("2") = 0UL;
143
144 asm volatile(
145 ".long 0xb2af0000" /* PQAP(RAPQ) */
146 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
147 return reg1;
148}
149
150/**
151 * Send message to adjunct processor queue.
152 * @qid: the ap queue number
153 * @psmid: the program supplied message identifier
154 * @msg: the message text
155 * @length: the message length
156 *
157 * Returns ap queue status structure.
158 *
159 * Condition code 1 on NQAP can't happen because the L bit is 1.
160 *
161 * Condition code 2 on NQAP also means the send is incomplete,
162 * because a segment boundary was reached. The NQAP is repeated.
163 */
164static inline struct ap_queue_status
165__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
166{
167 typedef struct { char _[length]; } msgblock;
168 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
169 register struct ap_queue_status reg1 asm ("1");
170 register unsigned long reg2 asm ("2") = (unsigned long) msg;
171 register unsigned long reg3 asm ("3") = (unsigned long) length;
172 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
173 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
174
175 asm volatile (
176 "0: .long 0xb2ad0042\n" /* DQAP */
177 " brc 2,0b"
178 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
179 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
180 : "cc" );
181 return reg1;
182}
183
184int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
185{
186 struct ap_queue_status status;
187
188 status = __ap_send(qid, psmid, msg, length);
189 switch (status.response_code) {
190 case AP_RESPONSE_NORMAL:
191 return 0;
192 case AP_RESPONSE_Q_FULL:
af512ed0 193 case AP_RESPONSE_RESET_IN_PROGRESS:
1534c382
MS
194 return -EBUSY;
195 default: /* Device is gone. */
196 return -ENODEV;
197 }
198}
199EXPORT_SYMBOL(ap_send);
200
201/*
202 * Receive message from adjunct processor queue.
203 * @qid: the ap queue number
204 * @psmid: pointer to program supplied message identifier
205 * @msg: the message text
206 * @length: the message length
207 *
208 * Returns ap queue status structure.
209 *
210 * Condition code 1 on DQAP means the receive has taken place
211 * but only partially. The response is incomplete, hence the
212 * DQAP is repeated.
213 *
214 * Condition code 2 on DQAP also means the receive is incomplete,
215 * this time because a segment boundary was reached. Again, the
216 * DQAP is repeated.
217 *
218 * Note that gpr2 is used by the DQAP instruction to keep track of
219 * any 'residual' length, in case the instruction gets interrupted.
220 * Hence it gets zeroed before the instruction.
221 */
222static inline struct ap_queue_status
223__ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
224{
225 typedef struct { char _[length]; } msgblock;
226 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
227 register struct ap_queue_status reg1 asm ("1");
228 register unsigned long reg2 asm("2") = 0UL;
229 register unsigned long reg4 asm("4") = (unsigned long) msg;
230 register unsigned long reg5 asm("5") = (unsigned long) length;
231 register unsigned long reg6 asm("6") = 0UL;
232 register unsigned long reg7 asm("7") = 0UL;
233
234
235 asm volatile(
236 "0: .long 0xb2ae0064\n"
237 " brc 6,0b\n"
238 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
239 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
240 "=m" (*(msgblock *) msg) : : "cc" );
241 *psmid = (((unsigned long long) reg6) << 32) + reg7;
242 return reg1;
243}
244
245int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
246{
247 struct ap_queue_status status;
248
249 status = __ap_recv(qid, psmid, msg, length);
250 switch (status.response_code) {
251 case AP_RESPONSE_NORMAL:
252 return 0;
253 case AP_RESPONSE_NO_PENDING_REPLY:
254 if (status.queue_empty)
255 return -ENOENT;
256 return -EBUSY;
af512ed0
RW
257 case AP_RESPONSE_RESET_IN_PROGRESS:
258 return -EBUSY;
1534c382
MS
259 default:
260 return -ENODEV;
261 }
262}
263EXPORT_SYMBOL(ap_recv);
264
265/**
266 * Check if an AP queue is available. The test is repeated for
267 * AP_MAX_RESET times.
268 * @qid: the ap queue number
269 * @queue_depth: pointer to queue depth value
270 * @device_type: pointer to device type value
271 */
272static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
273{
274 struct ap_queue_status status;
275 int t_depth, t_device_type, rc, i;
276
277 rc = -EBUSY;
278 for (i = 0; i < AP_MAX_RESET; i++) {
279 status = ap_test_queue(qid, &t_depth, &t_device_type);
280 switch (status.response_code) {
281 case AP_RESPONSE_NORMAL:
282 *queue_depth = t_depth + 1;
283 *device_type = t_device_type;
284 rc = 0;
285 break;
286 case AP_RESPONSE_Q_NOT_AVAIL:
287 rc = -ENODEV;
288 break;
289 case AP_RESPONSE_RESET_IN_PROGRESS:
290 break;
291 case AP_RESPONSE_DECONFIGURED:
292 rc = -ENODEV;
293 break;
294 case AP_RESPONSE_CHECKSTOPPED:
295 rc = -ENODEV;
296 break;
297 case AP_RESPONSE_BUSY:
298 break;
299 default:
300 BUG();
301 }
302 if (rc != -EBUSY)
303 break;
304 if (i < AP_MAX_RESET - 1)
305 udelay(5);
306 }
307 return rc;
308}
309
310/**
311 * Reset an AP queue and wait for it to become available again.
312 * @qid: the ap queue number
313 */
314static int ap_init_queue(ap_qid_t qid)
315{
316 struct ap_queue_status status;
317 int rc, dummy, i;
318
319 rc = -ENODEV;
320 status = ap_reset_queue(qid);
321 for (i = 0; i < AP_MAX_RESET; i++) {
322 switch (status.response_code) {
323 case AP_RESPONSE_NORMAL:
324 if (status.queue_empty)
325 rc = 0;
326 break;
327 case AP_RESPONSE_Q_NOT_AVAIL:
328 case AP_RESPONSE_DECONFIGURED:
329 case AP_RESPONSE_CHECKSTOPPED:
330 i = AP_MAX_RESET; /* return with -ENODEV */
331 break;
332 case AP_RESPONSE_RESET_IN_PROGRESS:
af512ed0 333 rc = -EBUSY;
1534c382
MS
334 case AP_RESPONSE_BUSY:
335 default:
336 break;
337 }
af512ed0 338 if (rc != -ENODEV && rc != -EBUSY)
1534c382
MS
339 break;
340 if (i < AP_MAX_RESET - 1) {
341 udelay(5);
342 status = ap_test_queue(qid, &dummy, &dummy);
343 }
344 }
345 return rc;
346}
347
af512ed0
RW
348/**
349 * Arm request timeout if a AP device was idle and a new request is submitted.
350 */
351static void ap_increase_queue_count(struct ap_device *ap_dev)
352{
353 int timeout = ap_dev->drv->request_timeout;
354
355 ap_dev->queue_count++;
356 if (ap_dev->queue_count == 1) {
357 mod_timer(&ap_dev->timeout, jiffies + timeout);
358 ap_dev->reset = AP_RESET_ARMED;
359 }
360}
361
362/**
363 * AP device is still alive, re-schedule request timeout if there are still
364 * pending requests.
365 */
366static void ap_decrease_queue_count(struct ap_device *ap_dev)
367{
368 int timeout = ap_dev->drv->request_timeout;
369
370 ap_dev->queue_count--;
371 if (ap_dev->queue_count > 0)
372 mod_timer(&ap_dev->timeout, jiffies + timeout);
373 else
374 /**
375 * The timeout timer should to be disabled now - since
376 * del_timer_sync() is very expensive, we just tell via the
377 * reset flag to ignore the pending timeout timer.
378 */
379 ap_dev->reset = AP_RESET_IGNORE;
380}
381
1534c382
MS
382/**
383 * AP device related attributes.
384 */
385static ssize_t ap_hwtype_show(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct ap_device *ap_dev = to_ap_dev(dev);
389 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
390}
391static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
392
393static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
394 char *buf)
395{
396 struct ap_device *ap_dev = to_ap_dev(dev);
397 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
398}
399static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
400
401static ssize_t ap_request_count_show(struct device *dev,
402 struct device_attribute *attr,
403 char *buf)
404{
405 struct ap_device *ap_dev = to_ap_dev(dev);
406 int rc;
407
408 spin_lock_bh(&ap_dev->lock);
409 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
410 spin_unlock_bh(&ap_dev->lock);
411 return rc;
412}
413
414static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
415
416static ssize_t ap_modalias_show(struct device *dev,
417 struct device_attribute *attr, char *buf)
418{
419 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
420}
421
422static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
423
424static struct attribute *ap_dev_attrs[] = {
425 &dev_attr_hwtype.attr,
426 &dev_attr_depth.attr,
427 &dev_attr_request_count.attr,
428 &dev_attr_modalias.attr,
429 NULL
430};
431static struct attribute_group ap_dev_attr_group = {
432 .attrs = ap_dev_attrs
433};
434
435/**
436 * AP bus driver registration/unregistration.
437 */
438static int ap_bus_match(struct device *dev, struct device_driver *drv)
439{
440 struct ap_device *ap_dev = to_ap_dev(dev);
441 struct ap_driver *ap_drv = to_ap_drv(drv);
442 struct ap_device_id *id;
443
444 /**
445 * Compare device type of the device with the list of
446 * supported types of the device_driver.
447 */
448 for (id = ap_drv->ids; id->match_flags; id++) {
449 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
450 (id->dev_type != ap_dev->device_type))
451 continue;
452 return 1;
453 }
454 return 0;
455}
456
457/**
458 * uevent function for AP devices. It sets up a single environment
459 * variable DEV_TYPE which contains the hardware device type.
460 */
7eff2e7a 461static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
1534c382
MS
462{
463 struct ap_device *ap_dev = to_ap_dev(dev);
7eff2e7a 464 int retval = 0;
1534c382
MS
465
466 if (!ap_dev)
467 return -ENODEV;
468
469 /* Set up DEV_TYPE environment variable. */
7eff2e7a 470 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
bf62456e
ER
471 if (retval)
472 return retval;
473
66a4263b 474 /* Add MODALIAS= */
7eff2e7a 475 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
bf62456e 476
bf62456e 477 return retval;
1534c382
MS
478}
479
480static struct bus_type ap_bus_type = {
481 .name = "ap",
482 .match = &ap_bus_match,
483 .uevent = &ap_uevent,
484};
485
486static int ap_device_probe(struct device *dev)
487{
488 struct ap_device *ap_dev = to_ap_dev(dev);
489 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
490 int rc;
491
492 ap_dev->drv = ap_drv;
493 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
faa582ca
RW
494 if (!rc) {
495 spin_lock_bh(&ap_device_lock);
496 list_add(&ap_dev->list, &ap_device_list);
497 spin_unlock_bh(&ap_device_lock);
498 }
1534c382
MS
499 return rc;
500}
501
502/**
503 * Flush all requests from the request/pending queue of an AP device.
504 * @ap_dev: pointer to the AP device.
505 */
4d284cac 506static void __ap_flush_queue(struct ap_device *ap_dev)
1534c382
MS
507{
508 struct ap_message *ap_msg, *next;
509
510 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
511 list_del_init(&ap_msg->list);
512 ap_dev->pendingq_count--;
513 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
514 }
515 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
516 list_del_init(&ap_msg->list);
517 ap_dev->requestq_count--;
518 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
519 }
520}
521
522void ap_flush_queue(struct ap_device *ap_dev)
523{
524 spin_lock_bh(&ap_dev->lock);
525 __ap_flush_queue(ap_dev);
526 spin_unlock_bh(&ap_dev->lock);
527}
528EXPORT_SYMBOL(ap_flush_queue);
529
530static int ap_device_remove(struct device *dev)
531{
532 struct ap_device *ap_dev = to_ap_dev(dev);
533 struct ap_driver *ap_drv = ap_dev->drv;
534
4e56296d 535 ap_flush_queue(ap_dev);
af512ed0 536 del_timer_sync(&ap_dev->timeout);
cf352ce0
RW
537 spin_lock_bh(&ap_device_lock);
538 list_del_init(&ap_dev->list);
539 spin_unlock_bh(&ap_device_lock);
faa582ca
RW
540 if (ap_drv->remove)
541 ap_drv->remove(ap_dev);
e675c0d2
RW
542 spin_lock_bh(&ap_dev->lock);
543 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
544 spin_unlock_bh(&ap_dev->lock);
1534c382
MS
545 return 0;
546}
547
548int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
549 char *name)
550{
551 struct device_driver *drv = &ap_drv->driver;
552
553 drv->bus = &ap_bus_type;
554 drv->probe = ap_device_probe;
555 drv->remove = ap_device_remove;
556 drv->owner = owner;
557 drv->name = name;
558 return driver_register(drv);
559}
560EXPORT_SYMBOL(ap_driver_register);
561
562void ap_driver_unregister(struct ap_driver *ap_drv)
563{
564 driver_unregister(&ap_drv->driver);
565}
566EXPORT_SYMBOL(ap_driver_unregister);
567
568/**
569 * AP bus attributes.
570 */
571static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
572{
573 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
574}
575
576static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
577
578static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
579{
580 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
581}
582
583static ssize_t ap_config_time_store(struct bus_type *bus,
584 const char *buf, size_t count)
585{
586 int time;
587
588 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
589 return -EINVAL;
590 ap_config_time = time;
591 if (!timer_pending(&ap_config_timer) ||
592 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
593 ap_config_timer.expires = jiffies + ap_config_time * HZ;
594 add_timer(&ap_config_timer);
595 }
596 return count;
597}
598
599static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
600
601static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
602{
603 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
604}
605
606static ssize_t ap_poll_thread_store(struct bus_type *bus,
607 const char *buf, size_t count)
608{
609 int flag, rc;
610
611 if (sscanf(buf, "%d\n", &flag) != 1)
612 return -EINVAL;
613 if (flag) {
614 rc = ap_poll_thread_start();
615 if (rc)
616 return rc;
617 }
618 else
619 ap_poll_thread_stop();
620 return count;
621}
622
623static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
624
625static struct bus_attribute *const ap_bus_attrs[] = {
626 &bus_attr_ap_domain,
627 &bus_attr_config_time,
628 &bus_attr_poll_thread,
629 NULL
630};
631
632/**
633 * Pick one of the 16 ap domains.
634 */
4d284cac 635static int ap_select_domain(void)
1534c382
MS
636{
637 int queue_depth, device_type, count, max_count, best_domain;
638 int rc, i, j;
639
640 /**
641 * We want to use a single domain. Either the one specified with
642 * the "domain=" parameter or the domain with the maximum number
643 * of devices.
644 */
645 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
646 /* Domain has already been selected. */
647 return 0;
648 best_domain = -1;
649 max_count = 0;
650 for (i = 0; i < AP_DOMAINS; i++) {
651 count = 0;
652 for (j = 0; j < AP_DEVICES; j++) {
653 ap_qid_t qid = AP_MKQID(j, i);
654 rc = ap_query_queue(qid, &queue_depth, &device_type);
655 if (rc)
656 continue;
657 count++;
658 }
659 if (count > max_count) {
660 max_count = count;
661 best_domain = i;
662 }
663 }
664 if (best_domain >= 0){
665 ap_domain_index = best_domain;
666 return 0;
667 }
668 return -ENODEV;
669}
670
671/**
672 * Find the device type if query queue returned a device type of 0.
673 * @ap_dev: pointer to the AP device.
674 */
675static int ap_probe_device_type(struct ap_device *ap_dev)
676{
677 static unsigned char msg[] = {
678 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
679 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
680 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
681 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
682 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
683 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
684 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
685 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
686 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
687 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
688 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
689 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
690 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
691 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
692 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
693 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
694 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
695 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
696 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
697 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
698 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
699 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
700 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
701 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
702 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
703 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
704 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
705 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
706 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
707 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
708 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
709 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
710 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
711 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
712 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
713 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
714 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
715 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
716 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
717 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
718 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
719 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
720 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
721 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
722 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
723 };
724 struct ap_queue_status status;
725 unsigned long long psmid;
726 char *reply;
727 int rc, i;
728
729 reply = (void *) get_zeroed_page(GFP_KERNEL);
730 if (!reply) {
731 rc = -ENOMEM;
732 goto out;
733 }
734
735 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
736 msg, sizeof(msg));
737 if (status.response_code != AP_RESPONSE_NORMAL) {
738 rc = -ENODEV;
739 goto out_free;
740 }
741
742 /* Wait for the test message to complete. */
743 for (i = 0; i < 6; i++) {
744 mdelay(300);
745 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
746 if (status.response_code == AP_RESPONSE_NORMAL &&
747 psmid == 0x0102030405060708ULL)
748 break;
749 }
750 if (i < 6) {
751 /* Got an answer. */
752 if (reply[0] == 0x00 && reply[1] == 0x86)
753 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
754 else
755 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
756 rc = 0;
757 } else
758 rc = -ENODEV;
759
760out_free:
761 free_page((unsigned long) reply);
762out:
763 return rc;
764}
765
766/**
767 * Scan the ap bus for new devices.
768 */
769static int __ap_scan_bus(struct device *dev, void *data)
770{
771 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
772}
773
774static void ap_device_release(struct device *dev)
775{
776 struct ap_device *ap_dev = to_ap_dev(dev);
777
778 kfree(ap_dev);
779}
780
4927b3f7 781static void ap_scan_bus(struct work_struct *unused)
1534c382
MS
782{
783 struct ap_device *ap_dev;
784 struct device *dev;
785 ap_qid_t qid;
786 int queue_depth, device_type;
787 int rc, i;
788
789 if (ap_select_domain() != 0)
790 return;
791 for (i = 0; i < AP_DEVICES; i++) {
792 qid = AP_MKQID(i, ap_domain_index);
793 dev = bus_find_device(&ap_bus_type, NULL,
794 (void *)(unsigned long)qid,
795 __ap_scan_bus);
f3b017d8 796 rc = ap_query_queue(qid, &queue_depth, &device_type);
c6a48264 797 if (dev) {
af512ed0
RW
798 if (rc == -EBUSY) {
799 set_current_state(TASK_UNINTERRUPTIBLE);
800 schedule_timeout(AP_RESET_TIMEOUT);
801 rc = ap_query_queue(qid, &queue_depth,
802 &device_type);
803 }
c6a48264
RW
804 ap_dev = to_ap_dev(dev);
805 spin_lock_bh(&ap_dev->lock);
806 if (rc || ap_dev->unregistered) {
807 spin_unlock_bh(&ap_dev->lock);
c6a48264 808 device_unregister(dev);
af512ed0 809 put_device(dev);
c6a48264 810 continue;
af512ed0
RW
811 }
812 spin_unlock_bh(&ap_dev->lock);
1534c382
MS
813 put_device(dev);
814 continue;
815 }
1534c382
MS
816 if (rc)
817 continue;
818 rc = ap_init_queue(qid);
819 if (rc)
820 continue;
821 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
822 if (!ap_dev)
823 break;
824 ap_dev->qid = qid;
825 ap_dev->queue_depth = queue_depth;
4e56296d 826 ap_dev->unregistered = 1;
1534c382
MS
827 spin_lock_init(&ap_dev->lock);
828 INIT_LIST_HEAD(&ap_dev->pendingq);
829 INIT_LIST_HEAD(&ap_dev->requestq);
cf352ce0 830 INIT_LIST_HEAD(&ap_dev->list);
af512ed0
RW
831 setup_timer(&ap_dev->timeout, ap_request_timeout,
832 (unsigned long) ap_dev);
1534c382
MS
833 if (device_type == 0)
834 ap_probe_device_type(ap_dev);
835 else
836 ap_dev->device_type = device_type;
837
838 ap_dev->device.bus = &ap_bus_type;
839 ap_dev->device.parent = ap_root_device;
840 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
841 AP_QID_DEVICE(ap_dev->qid));
842 ap_dev->device.release = ap_device_release;
843 rc = device_register(&ap_dev->device);
844 if (rc) {
845 kfree(ap_dev);
846 continue;
847 }
848 /* Add device attributes. */
849 rc = sysfs_create_group(&ap_dev->device.kobj,
850 &ap_dev_attr_group);
4e56296d
RW
851 if (!rc) {
852 spin_lock_bh(&ap_dev->lock);
853 ap_dev->unregistered = 0;
854 spin_unlock_bh(&ap_dev->lock);
855 }
856 else
1534c382
MS
857 device_unregister(&ap_dev->device);
858 }
859}
860
861static void
862ap_config_timeout(unsigned long ptr)
863{
864 queue_work(ap_work_queue, &ap_config_work);
865 ap_config_timer.expires = jiffies + ap_config_time * HZ;
866 add_timer(&ap_config_timer);
867}
868
869/**
870 * Set up the timer to run the poll tasklet
871 */
872static inline void ap_schedule_poll_timer(void)
873{
874 if (timer_pending(&ap_poll_timer))
875 return;
876 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
877}
878
879/**
880 * Receive pending reply messages from an AP device.
881 * @ap_dev: pointer to the AP device
882 * @flags: pointer to control flags, bit 2^0 is set if another poll is
883 * required, bit 2^1 is set if the poll timer needs to get armed
884 * Returns 0 if the device is still present, -ENODEV if not.
885 */
4d284cac 886static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
1534c382
MS
887{
888 struct ap_queue_status status;
889 struct ap_message *ap_msg;
890
891 if (ap_dev->queue_count <= 0)
892 return 0;
893 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
894 ap_dev->reply->message, ap_dev->reply->length);
895 switch (status.response_code) {
896 case AP_RESPONSE_NORMAL:
897 atomic_dec(&ap_poll_requests);
af512ed0 898 ap_decrease_queue_count(ap_dev);
1534c382
MS
899 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
900 if (ap_msg->psmid != ap_dev->reply->psmid)
901 continue;
902 list_del_init(&ap_msg->list);
903 ap_dev->pendingq_count--;
904 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
905 break;
906 }
907 if (ap_dev->queue_count > 0)
908 *flags |= 1;
909 break;
910 case AP_RESPONSE_NO_PENDING_REPLY:
911 if (status.queue_empty) {
912 /* The card shouldn't forget requests but who knows. */
e675c0d2 913 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1534c382
MS
914 ap_dev->queue_count = 0;
915 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
916 ap_dev->requestq_count += ap_dev->pendingq_count;
917 ap_dev->pendingq_count = 0;
918 } else
919 *flags |= 2;
920 break;
921 default:
922 return -ENODEV;
923 }
924 return 0;
925}
926
927/**
928 * Send messages from the request queue to an AP device.
929 * @ap_dev: pointer to the AP device
930 * @flags: pointer to control flags, bit 2^0 is set if another poll is
931 * required, bit 2^1 is set if the poll timer needs to get armed
932 * Returns 0 if the device is still present, -ENODEV if not.
933 */
4d284cac 934static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
1534c382
MS
935{
936 struct ap_queue_status status;
937 struct ap_message *ap_msg;
938
939 if (ap_dev->requestq_count <= 0 ||
940 ap_dev->queue_count >= ap_dev->queue_depth)
941 return 0;
942 /* Start the next request on the queue. */
943 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
944 status = __ap_send(ap_dev->qid, ap_msg->psmid,
945 ap_msg->message, ap_msg->length);
946 switch (status.response_code) {
947 case AP_RESPONSE_NORMAL:
948 atomic_inc(&ap_poll_requests);
af512ed0 949 ap_increase_queue_count(ap_dev);
1534c382
MS
950 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
951 ap_dev->requestq_count--;
952 ap_dev->pendingq_count++;
953 if (ap_dev->queue_count < ap_dev->queue_depth &&
954 ap_dev->requestq_count > 0)
955 *flags |= 1;
956 *flags |= 2;
957 break;
958 case AP_RESPONSE_Q_FULL:
af512ed0 959 case AP_RESPONSE_RESET_IN_PROGRESS:
1534c382
MS
960 *flags |= 2;
961 break;
962 case AP_RESPONSE_MESSAGE_TOO_BIG:
963 return -EINVAL;
964 default:
965 return -ENODEV;
966 }
967 return 0;
968}
969
970/**
971 * Poll AP device for pending replies and send new messages. If either
972 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
973 * @ap_dev: pointer to the bus device
974 * @flags: pointer to control flags, bit 2^0 is set if another poll is
975 * required, bit 2^1 is set if the poll timer needs to get armed
976 * Returns 0.
977 */
978static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
979{
980 int rc;
981
982 rc = ap_poll_read(ap_dev, flags);
983 if (rc)
984 return rc;
985 return ap_poll_write(ap_dev, flags);
986}
987
988/**
989 * Queue a message to a device.
990 * @ap_dev: pointer to the AP device
991 * @ap_msg: the message to be queued
992 */
993static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
994{
995 struct ap_queue_status status;
996
997 if (list_empty(&ap_dev->requestq) &&
998 ap_dev->queue_count < ap_dev->queue_depth) {
999 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1000 ap_msg->message, ap_msg->length);
1001 switch (status.response_code) {
1002 case AP_RESPONSE_NORMAL:
1003 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1004 atomic_inc(&ap_poll_requests);
1005 ap_dev->pendingq_count++;
af512ed0 1006 ap_increase_queue_count(ap_dev);
1534c382
MS
1007 ap_dev->total_request_count++;
1008 break;
1009 case AP_RESPONSE_Q_FULL:
af512ed0 1010 case AP_RESPONSE_RESET_IN_PROGRESS:
1534c382
MS
1011 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1012 ap_dev->requestq_count++;
1013 ap_dev->total_request_count++;
1014 return -EBUSY;
1015 case AP_RESPONSE_MESSAGE_TOO_BIG:
1016 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1017 return -EINVAL;
1018 default: /* Device is gone. */
1019 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1020 return -ENODEV;
1021 }
1022 } else {
1023 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1024 ap_dev->requestq_count++;
1025 ap_dev->total_request_count++;
1026 return -EBUSY;
1027 }
1028 ap_schedule_poll_timer();
1029 return 0;
1030}
1031
1032void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1033{
1034 unsigned long flags;
1035 int rc;
1036
1037 spin_lock_bh(&ap_dev->lock);
1038 if (!ap_dev->unregistered) {
1039 /* Make room on the queue by polling for finished requests. */
1040 rc = ap_poll_queue(ap_dev, &flags);
1041 if (!rc)
1042 rc = __ap_queue_message(ap_dev, ap_msg);
1043 if (!rc)
1044 wake_up(&ap_poll_wait);
4e56296d
RW
1045 if (rc == -ENODEV)
1046 ap_dev->unregistered = 1;
1534c382
MS
1047 } else {
1048 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
c6a48264 1049 rc = -ENODEV;
1534c382
MS
1050 }
1051 spin_unlock_bh(&ap_dev->lock);
1052 if (rc == -ENODEV)
1053 device_unregister(&ap_dev->device);
1054}
1055EXPORT_SYMBOL(ap_queue_message);
1056
1057/**
1058 * Cancel a crypto request. This is done by removing the request
1059 * from the devive pendingq or requestq queue. Note that the
1060 * request stays on the AP queue. When it finishes the message
1061 * reply will be discarded because the psmid can't be found.
1062 * @ap_dev: AP device that has the message queued
1063 * @ap_msg: the message that is to be removed
1064 */
1065void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1066{
1067 struct ap_message *tmp;
1068
1069 spin_lock_bh(&ap_dev->lock);
1070 if (!list_empty(&ap_msg->list)) {
1071 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1072 if (tmp->psmid == ap_msg->psmid) {
1073 ap_dev->pendingq_count--;
1074 goto found;
1075 }
1076 ap_dev->requestq_count--;
1077 found:
1078 list_del_init(&ap_msg->list);
1079 }
1080 spin_unlock_bh(&ap_dev->lock);
1081}
1082EXPORT_SYMBOL(ap_cancel_message);
1083
1084/**
1085 * AP receive polling for finished AP requests
1086 */
1087static void ap_poll_timeout(unsigned long unused)
1088{
1089 tasklet_schedule(&ap_tasklet);
1090}
1091
af512ed0
RW
1092/**
1093 * Reset a not responding AP device and move all requests from the
1094 * pending queue to the request queue.
1095 */
1096static void ap_reset(struct ap_device *ap_dev)
1097{
1098 int rc;
1099
1100 ap_dev->reset = AP_RESET_IGNORE;
1101 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1102 ap_dev->queue_count = 0;
1103 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1104 ap_dev->requestq_count += ap_dev->pendingq_count;
1105 ap_dev->pendingq_count = 0;
1106 rc = ap_init_queue(ap_dev->qid);
1107 if (rc == -ENODEV)
1108 ap_dev->unregistered = 1;
1109}
1110
1534c382
MS
1111/**
1112 * Poll all AP devices on the bus in a round robin fashion. Continue
1113 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1114 * of the control flags has been set arm the poll timer.
1115 */
cf352ce0 1116static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
1534c382 1117{
1534c382
MS
1118 spin_lock(&ap_dev->lock);
1119 if (!ap_dev->unregistered) {
c6a48264 1120 if (ap_poll_queue(ap_dev, flags))
4e56296d 1121 ap_dev->unregistered = 1;
af512ed0
RW
1122 if (ap_dev->reset == AP_RESET_DO)
1123 ap_reset(ap_dev);
c6a48264 1124 }
1534c382 1125 spin_unlock(&ap_dev->lock);
1534c382
MS
1126 return 0;
1127}
1128
1129static void ap_poll_all(unsigned long dummy)
1130{
1131 unsigned long flags;
cf352ce0 1132 struct ap_device *ap_dev;
1534c382
MS
1133
1134 do {
1135 flags = 0;
cf352ce0
RW
1136 spin_lock(&ap_device_lock);
1137 list_for_each_entry(ap_dev, &ap_device_list, list) {
1138 __ap_poll_all(ap_dev, &flags);
1139 }
1140 spin_unlock(&ap_device_lock);
1534c382
MS
1141 } while (flags & 1);
1142 if (flags & 2)
1143 ap_schedule_poll_timer();
1144}
1145
1146/**
1147 * AP bus poll thread. The purpose of this thread is to poll for
1148 * finished requests in a loop if there is a "free" cpu - that is
1149 * a cpu that doesn't have anything better to do. The polling stops
1150 * as soon as there is another task or if all messages have been
1151 * delivered.
1152 */
1153static int ap_poll_thread(void *data)
1154{
1155 DECLARE_WAITQUEUE(wait, current);
1156 unsigned long flags;
1157 int requests;
cf352ce0 1158 struct ap_device *ap_dev;
1534c382 1159
d83682b3 1160 set_user_nice(current, 19);
1534c382
MS
1161 while (1) {
1162 if (need_resched()) {
1163 schedule();
1164 continue;
1165 }
1166 add_wait_queue(&ap_poll_wait, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168 if (kthread_should_stop())
1169 break;
1170 requests = atomic_read(&ap_poll_requests);
1171 if (requests <= 0)
1172 schedule();
1173 set_current_state(TASK_RUNNING);
1174 remove_wait_queue(&ap_poll_wait, &wait);
1175
1534c382 1176 flags = 0;
cf352ce0
RW
1177 spin_lock_bh(&ap_device_lock);
1178 list_for_each_entry(ap_dev, &ap_device_list, list) {
1179 __ap_poll_all(ap_dev, &flags);
1180 }
1181 spin_unlock_bh(&ap_device_lock);
1534c382
MS
1182 }
1183 set_current_state(TASK_RUNNING);
1184 remove_wait_queue(&ap_poll_wait, &wait);
1185 return 0;
1186}
1187
1188static int ap_poll_thread_start(void)
1189{
1190 int rc;
1191
1192 mutex_lock(&ap_poll_thread_mutex);
1193 if (!ap_poll_kthread) {
1194 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1195 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1196 if (rc)
1197 ap_poll_kthread = NULL;
1198 }
1199 else
1200 rc = 0;
1201 mutex_unlock(&ap_poll_thread_mutex);
1202 return rc;
1203}
1204
1205static void ap_poll_thread_stop(void)
1206{
1207 mutex_lock(&ap_poll_thread_mutex);
1208 if (ap_poll_kthread) {
1209 kthread_stop(ap_poll_kthread);
1210 ap_poll_kthread = NULL;
1211 }
1212 mutex_unlock(&ap_poll_thread_mutex);
1213}
1214
af512ed0
RW
1215/**
1216 * Handling of request timeouts
1217 */
1218static void ap_request_timeout(unsigned long data)
1219{
1220 struct ap_device *ap_dev = (struct ap_device *) data;
1221
1222 if (ap_dev->reset == AP_RESET_ARMED)
1223 ap_dev->reset = AP_RESET_DO;
1224}
1225
13e742ba
RW
1226static void ap_reset_domain(void)
1227{
1228 int i;
1229
39aa7cf6
RW
1230 if (ap_domain_index != -1)
1231 for (i = 0; i < AP_DEVICES; i++)
1232 ap_reset_queue(AP_MKQID(i, ap_domain_index));
13e742ba
RW
1233}
1234
1235static void ap_reset_all(void)
85eca850
RW
1236{
1237 int i, j;
1238
1239 for (i = 0; i < AP_DOMAINS; i++)
1240 for (j = 0; j < AP_DEVICES; j++)
1241 ap_reset_queue(AP_MKQID(j, i));
1242}
1243
1244static struct reset_call ap_reset_call = {
13e742ba 1245 .fn = ap_reset_all,
85eca850
RW
1246};
1247
1534c382
MS
1248/**
1249 * The module initialization code.
1250 */
1251int __init ap_module_init(void)
1252{
1253 int rc, i;
1254
1255 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1256 printk(KERN_WARNING "Invalid param: domain = %d. "
1257 " Not loading.\n", ap_domain_index);
1258 return -EINVAL;
1259 }
1260 if (ap_instructions_available() != 0) {
1261 printk(KERN_WARNING "AP instructions not installed.\n");
1262 return -ENODEV;
1263 }
85eca850 1264 register_reset_call(&ap_reset_call);
1534c382
MS
1265
1266 /* Create /sys/bus/ap. */
1267 rc = bus_register(&ap_bus_type);
1268 if (rc)
1269 goto out;
1270 for (i = 0; ap_bus_attrs[i]; i++) {
1271 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1272 if (rc)
1273 goto out_bus;
1274 }
1275
1276 /* Create /sys/devices/ap. */
1277 ap_root_device = s390_root_dev_register("ap");
1278 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1279 if (rc)
1280 goto out_bus;
1281
1282 ap_work_queue = create_singlethread_workqueue("kapwork");
1283 if (!ap_work_queue) {
1284 rc = -ENOMEM;
1285 goto out_root;
1286 }
1287
1288 if (ap_select_domain() == 0)
1289 ap_scan_bus(NULL);
1290
1291 /* Setup the ap bus rescan timer. */
1292 init_timer(&ap_config_timer);
1293 ap_config_timer.function = ap_config_timeout;
1294 ap_config_timer.data = 0;
1295 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1296 add_timer(&ap_config_timer);
1297
1298 /* Start the low priority AP bus poll thread. */
1299 if (ap_thread_flag) {
1300 rc = ap_poll_thread_start();
1301 if (rc)
1302 goto out_work;
1303 }
1304
1305 return 0;
1306
1307out_work:
1308 del_timer_sync(&ap_config_timer);
1309 del_timer_sync(&ap_poll_timer);
1310 destroy_workqueue(ap_work_queue);
1311out_root:
1312 s390_root_dev_unregister(ap_root_device);
1313out_bus:
1314 while (i--)
1315 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1316 bus_unregister(&ap_bus_type);
1317out:
85eca850 1318 unregister_reset_call(&ap_reset_call);
1534c382
MS
1319 return rc;
1320}
1321
1322static int __ap_match_all(struct device *dev, void *data)
1323{
1324 return 1;
1325}
1326
1327/**
1328 * The module termination code
1329 */
1330void ap_module_exit(void)
1331{
1332 int i;
1333 struct device *dev;
1334
13e742ba 1335 ap_reset_domain();
1534c382
MS
1336 ap_poll_thread_stop();
1337 del_timer_sync(&ap_config_timer);
1338 del_timer_sync(&ap_poll_timer);
1339 destroy_workqueue(ap_work_queue);
13e742ba 1340 tasklet_kill(&ap_tasklet);
1534c382
MS
1341 s390_root_dev_unregister(ap_root_device);
1342 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1343 __ap_match_all)))
1344 {
1345 device_unregister(dev);
1346 put_device(dev);
1347 }
1348 for (i = 0; ap_bus_attrs[i]; i++)
1349 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1350 bus_unregister(&ap_bus_type);
85eca850 1351 unregister_reset_call(&ap_reset_call);
1534c382
MS
1352}
1353
1354#ifndef CONFIG_ZCRYPT_MONOLITHIC
1355module_init(ap_module_init);
1356module_exit(ap_module_exit);
1357#endif