2 * core function to access sclp interface
4 * Copyright IBM Corp. 1999, 2009
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel_stat.h>
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <linux/init.h>
19 #include <linux/suspend.h>
20 #include <linux/completion.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
27 #define SCLP_HEADER "sclp: "
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock
);
32 /* Mask of events that we can send to the sclp interface. */
33 static sccb_mask_t sclp_receive_mask
;
35 /* Mask of events that we can receive from the sclp interface. */
36 static sccb_mask_t sclp_send_mask
;
38 /* List of registered event listeners and senders. */
39 static struct list_head sclp_reg_list
;
41 /* List of queued requests. */
42 static struct list_head sclp_req_queue
;
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req
;
46 static struct sclp_req sclp_init_req
;
47 static char sclp_read_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
48 static char sclp_init_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
51 static DECLARE_COMPLETION(sclp_request_queue_flushed
);
53 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
54 int sclp_console_pages
= SCLP_CONSOLE_PAGES
;
55 /* Flag to indicate if buffer pages are dropped on buffer full condition */
56 int sclp_console_drop
= 1;
57 /* Number of times the console dropped buffer pages */
58 unsigned long sclp_console_full
;
60 static void sclp_suspend_req_cb(struct sclp_req
*req
, void *data
)
62 complete(&sclp_request_queue_flushed
);
65 static int __init
sclp_setup_console_pages(char *str
)
69 rc
= kstrtoint(str
, 0, &pages
);
70 if (!rc
&& pages
>= SCLP_CONSOLE_PAGES
)
71 sclp_console_pages
= pages
;
75 __setup("sclp_con_pages=", sclp_setup_console_pages
);
77 static int __init
sclp_setup_console_drop(char *str
)
81 rc
= kstrtoint(str
, 0, &drop
);
83 sclp_console_drop
= drop
;
87 __setup("sclp_con_drop=", sclp_setup_console_drop
);
89 static struct sclp_req sclp_suspend_req
;
91 /* Timer for request retries. */
92 static struct timer_list sclp_request_timer
;
94 /* Timer for queued requests. */
95 static struct timer_list sclp_queue_timer
;
97 /* Internal state: is a request active at the sclp? */
98 static volatile enum sclp_running_state_t
{
99 sclp_running_state_idle
,
100 sclp_running_state_running
,
101 sclp_running_state_reset_pending
102 } sclp_running_state
= sclp_running_state_idle
;
104 /* Internal state: is a read request pending? */
105 static volatile enum sclp_reading_state_t
{
106 sclp_reading_state_idle
,
107 sclp_reading_state_reading
108 } sclp_reading_state
= sclp_reading_state_idle
;
110 /* Internal state: is the driver currently serving requests? */
111 static volatile enum sclp_activation_state_t
{
112 sclp_activation_state_active
,
113 sclp_activation_state_deactivating
,
114 sclp_activation_state_inactive
,
115 sclp_activation_state_activating
116 } sclp_activation_state
= sclp_activation_state_active
;
118 /* Internal state: is an init mask request pending? */
119 static volatile enum sclp_mask_state_t
{
120 sclp_mask_state_idle
,
121 sclp_mask_state_initializing
122 } sclp_mask_state
= sclp_mask_state_idle
;
124 /* Internal state: is the driver suspended? */
125 static enum sclp_suspend_state_t
{
126 sclp_suspend_state_running
,
127 sclp_suspend_state_suspended
,
128 } sclp_suspend_state
= sclp_suspend_state_running
;
130 /* Maximum retry counts */
131 #define SCLP_INIT_RETRY 3
132 #define SCLP_MASK_RETRY 3
134 /* Timeout intervals in seconds.*/
135 #define SCLP_BUSY_INTERVAL 10
136 #define SCLP_RETRY_INTERVAL 30
138 static void sclp_process_queue(void);
139 static void __sclp_make_read_req(void);
140 static int sclp_init_mask(int calculate
);
141 static int sclp_init(void);
144 __sclp_queue_read_req(void)
146 if (sclp_reading_state
== sclp_reading_state_idle
) {
147 sclp_reading_state
= sclp_reading_state_reading
;
148 __sclp_make_read_req();
149 /* Add request to head of queue */
150 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
154 /* Set up request retry timer. Called while sclp_lock is locked. */
156 __sclp_set_request_timer(unsigned long time
, void (*function
)(unsigned long),
159 del_timer(&sclp_request_timer
);
160 sclp_request_timer
.function
= function
;
161 sclp_request_timer
.data
= data
;
162 sclp_request_timer
.expires
= jiffies
+ time
;
163 add_timer(&sclp_request_timer
);
166 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
167 * force restart of running request. */
169 sclp_request_timeout(unsigned long data
)
173 spin_lock_irqsave(&sclp_lock
, flags
);
175 if (sclp_running_state
== sclp_running_state_running
) {
176 /* Break running state and queue NOP read event request
177 * to get a defined interface state. */
178 __sclp_queue_read_req();
179 sclp_running_state
= sclp_running_state_idle
;
182 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
183 sclp_request_timeout
, 0);
185 spin_unlock_irqrestore(&sclp_lock
, flags
);
186 sclp_process_queue();
190 * Returns the expire value in jiffies of the next pending request timeout,
191 * if any. Needs to be called with sclp_lock.
193 static unsigned long __sclp_req_queue_find_next_timeout(void)
195 unsigned long expires_next
= 0;
196 struct sclp_req
*req
;
198 list_for_each_entry(req
, &sclp_req_queue
, list
) {
199 if (!req
->queue_expires
)
202 (time_before(req
->queue_expires
, expires_next
)))
203 expires_next
= req
->queue_expires
;
209 * Returns expired request, if any, and removes it from the list.
211 static struct sclp_req
*__sclp_req_queue_remove_expired_req(void)
213 unsigned long flags
, now
;
214 struct sclp_req
*req
;
216 spin_lock_irqsave(&sclp_lock
, flags
);
218 /* Don't need list_for_each_safe because we break out after list_del */
219 list_for_each_entry(req
, &sclp_req_queue
, list
) {
220 if (!req
->queue_expires
)
222 if (time_before_eq(req
->queue_expires
, now
)) {
223 if (req
->status
== SCLP_REQ_QUEUED
) {
224 req
->status
= SCLP_REQ_QUEUED_TIMEOUT
;
225 list_del(&req
->list
);
232 spin_unlock_irqrestore(&sclp_lock
, flags
);
237 * Timeout handler for queued requests. Removes request from list and
238 * invokes callback. This timer can be set per request in situations where
239 * waiting too long would be harmful to the system, e.g. during SE reboot.
241 static void sclp_req_queue_timeout(unsigned long data
)
243 unsigned long flags
, expires_next
;
244 struct sclp_req
*req
;
247 req
= __sclp_req_queue_remove_expired_req();
248 if (req
&& req
->callback
)
249 req
->callback(req
, req
->callback_data
);
252 spin_lock_irqsave(&sclp_lock
, flags
);
253 expires_next
= __sclp_req_queue_find_next_timeout();
255 mod_timer(&sclp_queue_timer
, expires_next
);
256 spin_unlock_irqrestore(&sclp_lock
, flags
);
259 /* Try to start a request. Return zero if the request was successfully
260 * started or if it will be started at a later time. Return non-zero otherwise.
261 * Called while sclp_lock is locked. */
263 __sclp_start_request(struct sclp_req
*req
)
267 if (sclp_running_state
!= sclp_running_state_idle
)
269 del_timer(&sclp_request_timer
);
270 rc
= sclp_service_call(req
->command
, req
->sccb
);
274 /* Successfully started request */
275 req
->status
= SCLP_REQ_RUNNING
;
276 sclp_running_state
= sclp_running_state_running
;
277 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
278 sclp_request_timeout
, 1);
280 } else if (rc
== -EBUSY
) {
281 /* Try again later */
282 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
283 sclp_request_timeout
, 0);
287 req
->status
= SCLP_REQ_FAILED
;
291 /* Try to start queued requests. */
293 sclp_process_queue(void)
295 struct sclp_req
*req
;
299 spin_lock_irqsave(&sclp_lock
, flags
);
300 if (sclp_running_state
!= sclp_running_state_idle
) {
301 spin_unlock_irqrestore(&sclp_lock
, flags
);
304 del_timer(&sclp_request_timer
);
305 while (!list_empty(&sclp_req_queue
)) {
306 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
309 rc
= __sclp_start_request(req
);
313 if (req
->start_count
> 1) {
314 /* Cannot abort already submitted request - could still
315 * be active at the SCLP */
316 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
317 sclp_request_timeout
, 0);
321 /* Post-processing for aborted request */
322 list_del(&req
->list
);
324 spin_unlock_irqrestore(&sclp_lock
, flags
);
325 req
->callback(req
, req
->callback_data
);
326 spin_lock_irqsave(&sclp_lock
, flags
);
329 spin_unlock_irqrestore(&sclp_lock
, flags
);
332 static int __sclp_can_add_request(struct sclp_req
*req
)
334 if (req
== &sclp_suspend_req
|| req
== &sclp_init_req
)
336 if (sclp_suspend_state
!= sclp_suspend_state_running
)
338 if (sclp_init_state
!= sclp_init_state_initialized
)
340 if (sclp_activation_state
!= sclp_activation_state_active
)
345 /* Queue a new request. Return zero on success, non-zero otherwise. */
347 sclp_add_request(struct sclp_req
*req
)
352 spin_lock_irqsave(&sclp_lock
, flags
);
353 if (!__sclp_can_add_request(req
)) {
354 spin_unlock_irqrestore(&sclp_lock
, flags
);
357 req
->status
= SCLP_REQ_QUEUED
;
358 req
->start_count
= 0;
359 list_add_tail(&req
->list
, &sclp_req_queue
);
361 if (req
->queue_timeout
) {
362 req
->queue_expires
= jiffies
+ req
->queue_timeout
* HZ
;
363 if (!timer_pending(&sclp_queue_timer
) ||
364 time_after(sclp_queue_timer
.expires
, req
->queue_expires
))
365 mod_timer(&sclp_queue_timer
, req
->queue_expires
);
367 req
->queue_expires
= 0;
368 /* Start if request is first in list */
369 if (sclp_running_state
== sclp_running_state_idle
&&
370 req
->list
.prev
== &sclp_req_queue
) {
372 list_del(&req
->list
);
376 rc
= __sclp_start_request(req
);
378 list_del(&req
->list
);
381 spin_unlock_irqrestore(&sclp_lock
, flags
);
385 EXPORT_SYMBOL(sclp_add_request
);
387 /* Dispatch events found in request buffer to registered listeners. Return 0
388 * if all events were dispatched, non-zero otherwise. */
390 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
393 struct evbuf_header
*evbuf
;
395 struct sclp_register
*reg
;
399 spin_lock_irqsave(&sclp_lock
, flags
);
401 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
402 offset
+= evbuf
->length
) {
403 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
404 /* Check for malformed hardware response */
405 if (evbuf
->length
== 0)
407 /* Search for event handler */
409 list_for_each(l
, &sclp_reg_list
) {
410 reg
= list_entry(l
, struct sclp_register
, list
);
411 if (reg
->receive_mask
& (1 << (32 - evbuf
->type
)))
416 if (reg
&& reg
->receiver_fn
) {
417 spin_unlock_irqrestore(&sclp_lock
, flags
);
418 reg
->receiver_fn(evbuf
);
419 spin_lock_irqsave(&sclp_lock
, flags
);
420 } else if (reg
== NULL
)
423 spin_unlock_irqrestore(&sclp_lock
, flags
);
427 /* Read event data request callback. */
429 sclp_read_cb(struct sclp_req
*req
, void *data
)
432 struct sccb_header
*sccb
;
434 sccb
= (struct sccb_header
*) req
->sccb
;
435 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
436 sccb
->response_code
== 0x220))
437 sclp_dispatch_evbufs(sccb
);
438 spin_lock_irqsave(&sclp_lock
, flags
);
439 sclp_reading_state
= sclp_reading_state_idle
;
440 spin_unlock_irqrestore(&sclp_lock
, flags
);
443 /* Prepare read event data request. Called while sclp_lock is locked. */
444 static void __sclp_make_read_req(void)
446 struct sccb_header
*sccb
;
448 sccb
= (struct sccb_header
*) sclp_read_sccb
;
450 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
451 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
452 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
453 sclp_read_req
.start_count
= 0;
454 sclp_read_req
.callback
= sclp_read_cb
;
455 sclp_read_req
.sccb
= sccb
;
456 sccb
->length
= PAGE_SIZE
;
457 sccb
->function_code
= 0;
458 sccb
->control_mask
[2] = 0x80;
461 /* Search request list for request with matching sccb. Return request if found,
462 * NULL otherwise. Called while sclp_lock is locked. */
463 static inline struct sclp_req
*
464 __sclp_find_req(u32 sccb
)
467 struct sclp_req
*req
;
469 list_for_each(l
, &sclp_req_queue
) {
470 req
= list_entry(l
, struct sclp_req
, list
);
471 if (sccb
== (u32
) (addr_t
) req
->sccb
)
477 /* Handler for external interruption. Perform request post-processing.
478 * Prepare read event data request if necessary. Start processing of next
479 * request on queue. */
480 static void sclp_interrupt_handler(struct ext_code ext_code
,
481 unsigned int param32
, unsigned long param64
)
483 struct sclp_req
*req
;
487 inc_irq_stat(IRQEXT_SCP
);
488 spin_lock(&sclp_lock
);
489 finished_sccb
= param32
& 0xfffffff8;
490 evbuf_pending
= param32
& 0x3;
492 del_timer(&sclp_request_timer
);
493 sclp_running_state
= sclp_running_state_reset_pending
;
494 req
= __sclp_find_req(finished_sccb
);
496 /* Request post-processing */
497 list_del(&req
->list
);
498 req
->status
= SCLP_REQ_DONE
;
500 spin_unlock(&sclp_lock
);
501 req
->callback(req
, req
->callback_data
);
502 spin_lock(&sclp_lock
);
505 sclp_running_state
= sclp_running_state_idle
;
508 sclp_activation_state
== sclp_activation_state_active
)
509 __sclp_queue_read_req();
510 spin_unlock(&sclp_lock
);
511 sclp_process_queue();
514 /* Convert interval in jiffies to TOD ticks. */
516 sclp_tod_from_jiffies(unsigned long jiffies
)
518 return (u64
) (jiffies
/ HZ
) << 32;
521 /* Wait until a currently running request finished. Note: while this function
522 * is running, no timers are served on the calling CPU. */
526 unsigned long long old_tick
;
528 unsigned long cr0
, cr0_sync
;
532 /* We'll be disabling timer interrupts, so we need a custom timeout
535 if (timer_pending(&sclp_request_timer
)) {
536 /* Get timeout TOD value */
537 timeout
= get_tod_clock_fast() +
538 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
541 local_irq_save(flags
);
542 /* Prevent bottom half from executing once we force interrupts open */
543 irq_context
= in_interrupt();
546 /* Enable service-signal interruption, disable timer interrupts */
547 old_tick
= local_tick_disable();
549 __ctl_store(cr0
, 0, 0);
550 cr0_sync
= cr0
& ~CR0_IRQ_SUBCLASS_MASK
;
551 cr0_sync
|= 1UL << (63 - 54);
552 __ctl_load(cr0_sync
, 0, 0);
553 __arch_local_irq_stosm(0x01);
554 /* Loop until driver state indicates finished request */
555 while (sclp_running_state
!= sclp_running_state_idle
) {
556 /* Check for expired request timer */
557 if (timer_pending(&sclp_request_timer
) &&
558 get_tod_clock_fast() > timeout
&&
559 del_timer(&sclp_request_timer
))
560 sclp_request_timer
.function(sclp_request_timer
.data
);
564 __ctl_load(cr0
, 0, 0);
567 local_tick_enable(old_tick
);
568 local_irq_restore(flags
);
570 EXPORT_SYMBOL(sclp_sync_wait
);
572 /* Dispatch changes in send and receive mask to registered listeners. */
574 sclp_dispatch_state_change(void)
577 struct sclp_register
*reg
;
579 sccb_mask_t receive_mask
;
580 sccb_mask_t send_mask
;
583 spin_lock_irqsave(&sclp_lock
, flags
);
585 list_for_each(l
, &sclp_reg_list
) {
586 reg
= list_entry(l
, struct sclp_register
, list
);
587 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
588 send_mask
= reg
->receive_mask
& sclp_send_mask
;
589 if (reg
->sclp_receive_mask
!= receive_mask
||
590 reg
->sclp_send_mask
!= send_mask
) {
591 reg
->sclp_receive_mask
= receive_mask
;
592 reg
->sclp_send_mask
= send_mask
;
597 spin_unlock_irqrestore(&sclp_lock
, flags
);
598 if (reg
&& reg
->state_change_fn
)
599 reg
->state_change_fn(reg
);
603 struct sclp_statechangebuf
{
604 struct evbuf_header header
;
605 u8 validity_sclp_active_facility_mask
: 1;
606 u8 validity_sclp_receive_mask
: 1;
607 u8 validity_sclp_send_mask
: 1;
608 u8 validity_read_data_function_mask
: 1;
611 u64 sclp_active_facility_mask
;
612 sccb_mask_t sclp_receive_mask
;
613 sccb_mask_t sclp_send_mask
;
614 u32 read_data_function_mask
;
615 } __attribute__((packed
));
618 /* State change event callback. Inform listeners of changes. */
620 sclp_state_change_cb(struct evbuf_header
*evbuf
)
623 struct sclp_statechangebuf
*scbuf
;
625 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
626 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
628 spin_lock_irqsave(&sclp_lock
, flags
);
629 if (scbuf
->validity_sclp_receive_mask
)
630 sclp_receive_mask
= scbuf
->sclp_receive_mask
;
631 if (scbuf
->validity_sclp_send_mask
)
632 sclp_send_mask
= scbuf
->sclp_send_mask
;
633 spin_unlock_irqrestore(&sclp_lock
, flags
);
634 if (scbuf
->validity_sclp_active_facility_mask
)
635 sclp
.facilities
= scbuf
->sclp_active_facility_mask
;
636 sclp_dispatch_state_change();
639 static struct sclp_register sclp_state_change_event
= {
640 .receive_mask
= EVTYP_STATECHANGE_MASK
,
641 .receiver_fn
= sclp_state_change_cb
644 /* Calculate receive and send mask of currently registered listeners.
645 * Called while sclp_lock is locked. */
647 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
650 struct sclp_register
*t
;
654 list_for_each(l
, &sclp_reg_list
) {
655 t
= list_entry(l
, struct sclp_register
, list
);
656 *receive_mask
|= t
->receive_mask
;
657 *send_mask
|= t
->send_mask
;
661 /* Register event listener. Return 0 on success, non-zero otherwise. */
663 sclp_register(struct sclp_register
*reg
)
666 sccb_mask_t receive_mask
;
667 sccb_mask_t send_mask
;
673 spin_lock_irqsave(&sclp_lock
, flags
);
674 /* Check event mask for collisions */
675 __sclp_get_mask(&receive_mask
, &send_mask
);
676 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
677 spin_unlock_irqrestore(&sclp_lock
, flags
);
680 /* Trigger initial state change callback */
681 reg
->sclp_receive_mask
= 0;
682 reg
->sclp_send_mask
= 0;
683 reg
->pm_event_posted
= 0;
684 list_add(®
->list
, &sclp_reg_list
);
685 spin_unlock_irqrestore(&sclp_lock
, flags
);
686 rc
= sclp_init_mask(1);
688 spin_lock_irqsave(&sclp_lock
, flags
);
689 list_del(®
->list
);
690 spin_unlock_irqrestore(&sclp_lock
, flags
);
695 EXPORT_SYMBOL(sclp_register
);
697 /* Unregister event listener. */
699 sclp_unregister(struct sclp_register
*reg
)
703 spin_lock_irqsave(&sclp_lock
, flags
);
704 list_del(®
->list
);
705 spin_unlock_irqrestore(&sclp_lock
, flags
);
709 EXPORT_SYMBOL(sclp_unregister
);
711 /* Remove event buffers which are marked processed. Return the number of
712 * remaining event buffers. */
714 sclp_remove_processed(struct sccb_header
*sccb
)
716 struct evbuf_header
*evbuf
;
720 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
722 remaining
= sccb
->length
- sizeof(struct sccb_header
);
723 while (remaining
> 0) {
724 remaining
-= evbuf
->length
;
725 if (evbuf
->flags
& 0x80) {
726 sccb
->length
-= evbuf
->length
;
727 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
731 evbuf
= (struct evbuf_header
*)
732 ((addr_t
) evbuf
+ evbuf
->length
);
738 EXPORT_SYMBOL(sclp_remove_processed
);
740 /* Prepare init mask request. Called while sclp_lock is locked. */
742 __sclp_make_init_req(u32 receive_mask
, u32 send_mask
)
744 struct init_sccb
*sccb
;
746 sccb
= (struct init_sccb
*) sclp_init_sccb
;
748 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
749 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
750 sclp_init_req
.status
= SCLP_REQ_FILLED
;
751 sclp_init_req
.start_count
= 0;
752 sclp_init_req
.callback
= NULL
;
753 sclp_init_req
.callback_data
= NULL
;
754 sclp_init_req
.sccb
= sccb
;
755 sccb
->header
.length
= sizeof(struct init_sccb
);
756 sccb
->mask_length
= sizeof(sccb_mask_t
);
757 sccb
->receive_mask
= receive_mask
;
758 sccb
->send_mask
= send_mask
;
759 sccb
->sclp_receive_mask
= 0;
760 sccb
->sclp_send_mask
= 0;
763 /* Start init mask request. If calculate is non-zero, calculate the mask as
764 * requested by registered listeners. Use zero mask otherwise. Return 0 on
765 * success, non-zero otherwise. */
767 sclp_init_mask(int calculate
)
770 struct init_sccb
*sccb
= (struct init_sccb
*) sclp_init_sccb
;
771 sccb_mask_t receive_mask
;
772 sccb_mask_t send_mask
;
777 spin_lock_irqsave(&sclp_lock
, flags
);
778 /* Check if interface is in appropriate state */
779 if (sclp_mask_state
!= sclp_mask_state_idle
) {
780 spin_unlock_irqrestore(&sclp_lock
, flags
);
783 if (sclp_activation_state
== sclp_activation_state_inactive
) {
784 spin_unlock_irqrestore(&sclp_lock
, flags
);
787 sclp_mask_state
= sclp_mask_state_initializing
;
790 __sclp_get_mask(&receive_mask
, &send_mask
);
796 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
797 /* Prepare request */
798 __sclp_make_init_req(receive_mask
, send_mask
);
799 spin_unlock_irqrestore(&sclp_lock
, flags
);
800 if (sclp_add_request(&sclp_init_req
)) {
801 /* Try again later */
802 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
803 while (time_before(jiffies
, wait
))
805 spin_lock_irqsave(&sclp_lock
, flags
);
808 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
809 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
811 spin_lock_irqsave(&sclp_lock
, flags
);
812 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
813 sccb
->header
.response_code
== 0x20) {
814 /* Successful request */
816 sclp_receive_mask
= sccb
->sclp_receive_mask
;
817 sclp_send_mask
= sccb
->sclp_send_mask
;
819 sclp_receive_mask
= 0;
822 spin_unlock_irqrestore(&sclp_lock
, flags
);
823 sclp_dispatch_state_change();
824 spin_lock_irqsave(&sclp_lock
, flags
);
829 sclp_mask_state
= sclp_mask_state_idle
;
830 spin_unlock_irqrestore(&sclp_lock
, flags
);
834 /* Deactivate SCLP interface. On success, new requests will be rejected,
835 * events will no longer be dispatched. Return 0 on success, non-zero
838 sclp_deactivate(void)
843 spin_lock_irqsave(&sclp_lock
, flags
);
844 /* Deactivate can only be called when active */
845 if (sclp_activation_state
!= sclp_activation_state_active
) {
846 spin_unlock_irqrestore(&sclp_lock
, flags
);
849 sclp_activation_state
= sclp_activation_state_deactivating
;
850 spin_unlock_irqrestore(&sclp_lock
, flags
);
851 rc
= sclp_init_mask(0);
852 spin_lock_irqsave(&sclp_lock
, flags
);
854 sclp_activation_state
= sclp_activation_state_inactive
;
856 sclp_activation_state
= sclp_activation_state_active
;
857 spin_unlock_irqrestore(&sclp_lock
, flags
);
861 EXPORT_SYMBOL(sclp_deactivate
);
863 /* Reactivate SCLP interface after sclp_deactivate. On success, new
864 * requests will be accepted, events will be dispatched again. Return 0 on
865 * success, non-zero otherwise. */
867 sclp_reactivate(void)
872 spin_lock_irqsave(&sclp_lock
, flags
);
873 /* Reactivate can only be called when inactive */
874 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
875 spin_unlock_irqrestore(&sclp_lock
, flags
);
878 sclp_activation_state
= sclp_activation_state_activating
;
879 spin_unlock_irqrestore(&sclp_lock
, flags
);
880 rc
= sclp_init_mask(1);
881 spin_lock_irqsave(&sclp_lock
, flags
);
883 sclp_activation_state
= sclp_activation_state_active
;
885 sclp_activation_state
= sclp_activation_state_inactive
;
886 spin_unlock_irqrestore(&sclp_lock
, flags
);
890 EXPORT_SYMBOL(sclp_reactivate
);
892 /* Handler for external interruption used during initialization. Modify
893 * request state to done. */
894 static void sclp_check_handler(struct ext_code ext_code
,
895 unsigned int param32
, unsigned long param64
)
899 inc_irq_stat(IRQEXT_SCP
);
900 finished_sccb
= param32
& 0xfffffff8;
901 /* Is this the interrupt we are waiting for? */
902 if (finished_sccb
== 0)
904 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
)
905 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
907 spin_lock(&sclp_lock
);
908 if (sclp_running_state
== sclp_running_state_running
) {
909 sclp_init_req
.status
= SCLP_REQ_DONE
;
910 sclp_running_state
= sclp_running_state_idle
;
912 spin_unlock(&sclp_lock
);
915 /* Initial init mask request timed out. Modify request state to failed. */
917 sclp_check_timeout(unsigned long data
)
921 spin_lock_irqsave(&sclp_lock
, flags
);
922 if (sclp_running_state
== sclp_running_state_running
) {
923 sclp_init_req
.status
= SCLP_REQ_FAILED
;
924 sclp_running_state
= sclp_running_state_idle
;
926 spin_unlock_irqrestore(&sclp_lock
, flags
);
929 /* Perform a check of the SCLP interface. Return zero if the interface is
930 * available and there are no pending requests from a previous instance.
931 * Return non-zero otherwise. */
933 sclp_check_interface(void)
935 struct init_sccb
*sccb
;
940 spin_lock_irqsave(&sclp_lock
, flags
);
941 /* Prepare init mask command */
942 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
944 spin_unlock_irqrestore(&sclp_lock
, flags
);
947 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
948 __sclp_make_init_req(0, 0);
949 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
950 rc
= sclp_service_call(sclp_init_req
.command
, sccb
);
953 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
954 sclp_running_state
= sclp_running_state_running
;
955 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
956 sclp_check_timeout
, 0);
957 spin_unlock_irqrestore(&sclp_lock
, flags
);
958 /* Enable service-signal interruption - needs to happen
959 * with IRQs enabled. */
960 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
961 /* Wait for signal from interrupt or timeout */
963 /* Disable service-signal interruption - needs to happen
964 * with IRQs enabled. */
965 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
966 spin_lock_irqsave(&sclp_lock
, flags
);
967 del_timer(&sclp_request_timer
);
968 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
969 sccb
->header
.response_code
== 0x20) {
975 unregister_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
976 spin_unlock_irqrestore(&sclp_lock
, flags
);
980 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
981 * events from interfering with rebooted system. */
983 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
989 static struct notifier_block sclp_reboot_notifier
= {
990 .notifier_call
= sclp_reboot_event
994 * Suspend/resume SCLP notifier implementation
997 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event
, int rollback
)
999 struct sclp_register
*reg
;
1000 unsigned long flags
;
1003 spin_lock_irqsave(&sclp_lock
, flags
);
1004 list_for_each_entry(reg
, &sclp_reg_list
, list
)
1005 reg
->pm_event_posted
= 0;
1006 spin_unlock_irqrestore(&sclp_lock
, flags
);
1009 spin_lock_irqsave(&sclp_lock
, flags
);
1010 list_for_each_entry(reg
, &sclp_reg_list
, list
) {
1011 if (rollback
&& reg
->pm_event_posted
)
1013 if (!rollback
&& !reg
->pm_event_posted
)
1016 spin_unlock_irqrestore(&sclp_lock
, flags
);
1019 spin_unlock_irqrestore(&sclp_lock
, flags
);
1020 if (reg
->pm_event_fn
)
1021 reg
->pm_event_fn(reg
, sclp_pm_event
);
1022 reg
->pm_event_posted
= rollback
? 0 : 1;
1027 * Susend/resume callbacks for platform device
1030 static int sclp_freeze(struct device
*dev
)
1032 unsigned long flags
;
1035 sclp_pm_event(SCLP_PM_EVENT_FREEZE
, 0);
1037 spin_lock_irqsave(&sclp_lock
, flags
);
1038 sclp_suspend_state
= sclp_suspend_state_suspended
;
1039 spin_unlock_irqrestore(&sclp_lock
, flags
);
1041 /* Init supend data */
1042 memset(&sclp_suspend_req
, 0, sizeof(sclp_suspend_req
));
1043 sclp_suspend_req
.callback
= sclp_suspend_req_cb
;
1044 sclp_suspend_req
.status
= SCLP_REQ_FILLED
;
1045 init_completion(&sclp_request_queue_flushed
);
1047 rc
= sclp_add_request(&sclp_suspend_req
);
1049 wait_for_completion(&sclp_request_queue_flushed
);
1050 else if (rc
!= -ENODATA
)
1053 rc
= sclp_deactivate();
1059 spin_lock_irqsave(&sclp_lock
, flags
);
1060 sclp_suspend_state
= sclp_suspend_state_running
;
1061 spin_unlock_irqrestore(&sclp_lock
, flags
);
1062 sclp_pm_event(SCLP_PM_EVENT_THAW
, 1);
1066 static int sclp_undo_suspend(enum sclp_pm_event event
)
1068 unsigned long flags
;
1071 rc
= sclp_reactivate();
1075 spin_lock_irqsave(&sclp_lock
, flags
);
1076 sclp_suspend_state
= sclp_suspend_state_running
;
1077 spin_unlock_irqrestore(&sclp_lock
, flags
);
1079 sclp_pm_event(event
, 0);
1083 static int sclp_thaw(struct device
*dev
)
1085 return sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1088 static int sclp_restore(struct device
*dev
)
1090 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE
);
1093 static const struct dev_pm_ops sclp_pm_ops
= {
1094 .freeze
= sclp_freeze
,
1096 .restore
= sclp_restore
,
1099 static ssize_t
sclp_show_console_pages(struct device_driver
*dev
, char *buf
)
1101 return sprintf(buf
, "%i\n", sclp_console_pages
);
1104 static DRIVER_ATTR(con_pages
, S_IRUSR
, sclp_show_console_pages
, NULL
);
1106 static ssize_t
sclp_show_con_drop(struct device_driver
*dev
, char *buf
)
1108 return sprintf(buf
, "%i\n", sclp_console_drop
);
1111 static DRIVER_ATTR(con_drop
, S_IRUSR
, sclp_show_con_drop
, NULL
);
1113 static ssize_t
sclp_show_console_full(struct device_driver
*dev
, char *buf
)
1115 return sprintf(buf
, "%lu\n", sclp_console_full
);
1118 static DRIVER_ATTR(con_full
, S_IRUSR
, sclp_show_console_full
, NULL
);
1120 static struct attribute
*sclp_drv_attrs
[] = {
1121 &driver_attr_con_pages
.attr
,
1122 &driver_attr_con_drop
.attr
,
1123 &driver_attr_con_full
.attr
,
1126 static struct attribute_group sclp_drv_attr_group
= {
1127 .attrs
= sclp_drv_attrs
,
1129 static const struct attribute_group
*sclp_drv_attr_groups
[] = {
1130 &sclp_drv_attr_group
,
1134 static struct platform_driver sclp_pdrv
= {
1138 .groups
= sclp_drv_attr_groups
,
1142 static struct platform_device
*sclp_pdev
;
1144 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1149 unsigned long flags
;
1152 spin_lock_irqsave(&sclp_lock
, flags
);
1153 /* Check for previous or running initialization */
1154 if (sclp_init_state
!= sclp_init_state_uninitialized
)
1156 sclp_init_state
= sclp_init_state_initializing
;
1157 /* Set up variables */
1158 INIT_LIST_HEAD(&sclp_req_queue
);
1159 INIT_LIST_HEAD(&sclp_reg_list
);
1160 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
1161 init_timer(&sclp_request_timer
);
1162 init_timer(&sclp_queue_timer
);
1163 sclp_queue_timer
.function
= sclp_req_queue_timeout
;
1164 /* Check interface */
1165 spin_unlock_irqrestore(&sclp_lock
, flags
);
1166 rc
= sclp_check_interface();
1167 spin_lock_irqsave(&sclp_lock
, flags
);
1169 goto fail_init_state_uninitialized
;
1170 /* Register reboot handler */
1171 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
1173 goto fail_init_state_uninitialized
;
1174 /* Register interrupt handler */
1175 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_interrupt_handler
);
1177 goto fail_unregister_reboot_notifier
;
1178 sclp_init_state
= sclp_init_state_initialized
;
1179 spin_unlock_irqrestore(&sclp_lock
, flags
);
1180 /* Enable service-signal external interruption - needs to happen with
1182 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1186 fail_unregister_reboot_notifier
:
1187 unregister_reboot_notifier(&sclp_reboot_notifier
);
1188 fail_init_state_uninitialized
:
1189 sclp_init_state
= sclp_init_state_uninitialized
;
1191 spin_unlock_irqrestore(&sclp_lock
, flags
);
1196 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1197 * to print the panic message.
1199 static int sclp_panic_notify(struct notifier_block
*self
,
1200 unsigned long event
, void *data
)
1202 if (sclp_suspend_state
== sclp_suspend_state_suspended
)
1203 sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1207 static struct notifier_block sclp_on_panic_nb
= {
1208 .notifier_call
= sclp_panic_notify
,
1209 .priority
= SCLP_PANIC_PRIO
,
1212 static __init
int sclp_initcall(void)
1216 rc
= platform_driver_register(&sclp_pdrv
);
1220 sclp_pdev
= platform_device_register_simple("sclp", -1, NULL
, 0);
1221 rc
= PTR_ERR_OR_ZERO(sclp_pdev
);
1223 goto fail_platform_driver_unregister
;
1225 rc
= atomic_notifier_chain_register(&panic_notifier_list
,
1228 goto fail_platform_device_unregister
;
1232 fail_platform_device_unregister
:
1233 platform_device_unregister(sclp_pdev
);
1234 fail_platform_driver_unregister
:
1235 platform_driver_unregister(&sclp_pdrv
);
1239 arch_initcall(sclp_initcall
);