1 // SPDX-License-Identifier: GPL-2.0
3 * core function to access sclp interface
5 * Copyright IBM Corp. 1999, 2009
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/kernel_stat.h>
12 #include <linux/module.h>
13 #include <linux/err.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/timer.h>
17 #include <linux/reboot.h>
18 #include <linux/jiffies.h>
19 #include <linux/init.h>
20 #include <linux/suspend.h>
21 #include <linux/completion.h>
22 #include <linux/platform_device.h>
23 #include <asm/types.h>
28 #define SCLP_HEADER "sclp: "
30 /* Lock to protect internal data consistency. */
31 static DEFINE_SPINLOCK(sclp_lock
);
33 /* Mask of events that we can send to the sclp interface. */
34 static sccb_mask_t sclp_receive_mask
;
36 /* Mask of events that we can receive from the sclp interface. */
37 static sccb_mask_t sclp_send_mask
;
39 /* List of registered event listeners and senders. */
40 static struct list_head sclp_reg_list
;
42 /* List of queued requests. */
43 static struct list_head sclp_req_queue
;
45 /* Data for read and and init requests. */
46 static struct sclp_req sclp_read_req
;
47 static struct sclp_req sclp_init_req
;
48 static char sclp_read_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
49 static char sclp_init_sccb
[PAGE_SIZE
] __attribute__((__aligned__(PAGE_SIZE
)));
52 static DECLARE_COMPLETION(sclp_request_queue_flushed
);
54 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
55 int sclp_console_pages
= SCLP_CONSOLE_PAGES
;
56 /* Flag to indicate if buffer pages are dropped on buffer full condition */
57 int sclp_console_drop
= 1;
58 /* Number of times the console dropped buffer pages */
59 unsigned long sclp_console_full
;
61 static void sclp_suspend_req_cb(struct sclp_req
*req
, void *data
)
63 complete(&sclp_request_queue_flushed
);
66 static int __init
sclp_setup_console_pages(char *str
)
70 rc
= kstrtoint(str
, 0, &pages
);
71 if (!rc
&& pages
>= SCLP_CONSOLE_PAGES
)
72 sclp_console_pages
= pages
;
76 __setup("sclp_con_pages=", sclp_setup_console_pages
);
78 static int __init
sclp_setup_console_drop(char *str
)
82 rc
= kstrtoint(str
, 0, &drop
);
84 sclp_console_drop
= drop
;
88 __setup("sclp_con_drop=", sclp_setup_console_drop
);
90 static struct sclp_req sclp_suspend_req
;
92 /* Timer for request retries. */
93 static struct timer_list sclp_request_timer
;
95 /* Timer for queued requests. */
96 static struct timer_list sclp_queue_timer
;
98 /* Internal state: is a request active at the sclp? */
99 static volatile enum sclp_running_state_t
{
100 sclp_running_state_idle
,
101 sclp_running_state_running
,
102 sclp_running_state_reset_pending
103 } sclp_running_state
= sclp_running_state_idle
;
105 /* Internal state: is a read request pending? */
106 static volatile enum sclp_reading_state_t
{
107 sclp_reading_state_idle
,
108 sclp_reading_state_reading
109 } sclp_reading_state
= sclp_reading_state_idle
;
111 /* Internal state: is the driver currently serving requests? */
112 static volatile enum sclp_activation_state_t
{
113 sclp_activation_state_active
,
114 sclp_activation_state_deactivating
,
115 sclp_activation_state_inactive
,
116 sclp_activation_state_activating
117 } sclp_activation_state
= sclp_activation_state_active
;
119 /* Internal state: is an init mask request pending? */
120 static volatile enum sclp_mask_state_t
{
121 sclp_mask_state_idle
,
122 sclp_mask_state_initializing
123 } sclp_mask_state
= sclp_mask_state_idle
;
125 /* Internal state: is the driver suspended? */
126 static enum sclp_suspend_state_t
{
127 sclp_suspend_state_running
,
128 sclp_suspend_state_suspended
,
129 } sclp_suspend_state
= sclp_suspend_state_running
;
131 /* Maximum retry counts */
132 #define SCLP_INIT_RETRY 3
133 #define SCLP_MASK_RETRY 3
135 /* Timeout intervals in seconds.*/
136 #define SCLP_BUSY_INTERVAL 10
137 #define SCLP_RETRY_INTERVAL 30
139 static void sclp_process_queue(void);
140 static void __sclp_make_read_req(void);
141 static int sclp_init_mask(int calculate
);
142 static int sclp_init(void);
145 __sclp_queue_read_req(void)
147 if (sclp_reading_state
== sclp_reading_state_idle
) {
148 sclp_reading_state
= sclp_reading_state_reading
;
149 __sclp_make_read_req();
150 /* Add request to head of queue */
151 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
155 /* Set up request retry timer. Called while sclp_lock is locked. */
157 __sclp_set_request_timer(unsigned long time
, void (*function
)(unsigned long),
160 del_timer(&sclp_request_timer
);
161 sclp_request_timer
.function
= function
;
162 sclp_request_timer
.data
= data
;
163 sclp_request_timer
.expires
= jiffies
+ time
;
164 add_timer(&sclp_request_timer
);
167 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
168 * force restart of running request. */
170 sclp_request_timeout(unsigned long data
)
174 spin_lock_irqsave(&sclp_lock
, flags
);
176 if (sclp_running_state
== sclp_running_state_running
) {
177 /* Break running state and queue NOP read event request
178 * to get a defined interface state. */
179 __sclp_queue_read_req();
180 sclp_running_state
= sclp_running_state_idle
;
183 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
184 sclp_request_timeout
, 0);
186 spin_unlock_irqrestore(&sclp_lock
, flags
);
187 sclp_process_queue();
191 * Returns the expire value in jiffies of the next pending request timeout,
192 * if any. Needs to be called with sclp_lock.
194 static unsigned long __sclp_req_queue_find_next_timeout(void)
196 unsigned long expires_next
= 0;
197 struct sclp_req
*req
;
199 list_for_each_entry(req
, &sclp_req_queue
, list
) {
200 if (!req
->queue_expires
)
203 (time_before(req
->queue_expires
, expires_next
)))
204 expires_next
= req
->queue_expires
;
210 * Returns expired request, if any, and removes it from the list.
212 static struct sclp_req
*__sclp_req_queue_remove_expired_req(void)
214 unsigned long flags
, now
;
215 struct sclp_req
*req
;
217 spin_lock_irqsave(&sclp_lock
, flags
);
219 /* Don't need list_for_each_safe because we break out after list_del */
220 list_for_each_entry(req
, &sclp_req_queue
, list
) {
221 if (!req
->queue_expires
)
223 if (time_before_eq(req
->queue_expires
, now
)) {
224 if (req
->status
== SCLP_REQ_QUEUED
) {
225 req
->status
= SCLP_REQ_QUEUED_TIMEOUT
;
226 list_del(&req
->list
);
233 spin_unlock_irqrestore(&sclp_lock
, flags
);
238 * Timeout handler for queued requests. Removes request from list and
239 * invokes callback. This timer can be set per request in situations where
240 * waiting too long would be harmful to the system, e.g. during SE reboot.
242 static void sclp_req_queue_timeout(unsigned long data
)
244 unsigned long flags
, expires_next
;
245 struct sclp_req
*req
;
248 req
= __sclp_req_queue_remove_expired_req();
249 if (req
&& req
->callback
)
250 req
->callback(req
, req
->callback_data
);
253 spin_lock_irqsave(&sclp_lock
, flags
);
254 expires_next
= __sclp_req_queue_find_next_timeout();
256 mod_timer(&sclp_queue_timer
, expires_next
);
257 spin_unlock_irqrestore(&sclp_lock
, flags
);
260 /* Try to start a request. Return zero if the request was successfully
261 * started or if it will be started at a later time. Return non-zero otherwise.
262 * Called while sclp_lock is locked. */
264 __sclp_start_request(struct sclp_req
*req
)
268 if (sclp_running_state
!= sclp_running_state_idle
)
270 del_timer(&sclp_request_timer
);
271 rc
= sclp_service_call(req
->command
, req
->sccb
);
275 /* Successfully started request */
276 req
->status
= SCLP_REQ_RUNNING
;
277 sclp_running_state
= sclp_running_state_running
;
278 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
279 sclp_request_timeout
, 1);
281 } else if (rc
== -EBUSY
) {
282 /* Try again later */
283 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
284 sclp_request_timeout
, 0);
288 req
->status
= SCLP_REQ_FAILED
;
292 /* Try to start queued requests. */
294 sclp_process_queue(void)
296 struct sclp_req
*req
;
300 spin_lock_irqsave(&sclp_lock
, flags
);
301 if (sclp_running_state
!= sclp_running_state_idle
) {
302 spin_unlock_irqrestore(&sclp_lock
, flags
);
305 del_timer(&sclp_request_timer
);
306 while (!list_empty(&sclp_req_queue
)) {
307 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
310 rc
= __sclp_start_request(req
);
314 if (req
->start_count
> 1) {
315 /* Cannot abort already submitted request - could still
316 * be active at the SCLP */
317 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
318 sclp_request_timeout
, 0);
322 /* Post-processing for aborted request */
323 list_del(&req
->list
);
325 spin_unlock_irqrestore(&sclp_lock
, flags
);
326 req
->callback(req
, req
->callback_data
);
327 spin_lock_irqsave(&sclp_lock
, flags
);
330 spin_unlock_irqrestore(&sclp_lock
, flags
);
333 static int __sclp_can_add_request(struct sclp_req
*req
)
335 if (req
== &sclp_suspend_req
|| req
== &sclp_init_req
)
337 if (sclp_suspend_state
!= sclp_suspend_state_running
)
339 if (sclp_init_state
!= sclp_init_state_initialized
)
341 if (sclp_activation_state
!= sclp_activation_state_active
)
346 /* Queue a new request. Return zero on success, non-zero otherwise. */
348 sclp_add_request(struct sclp_req
*req
)
353 spin_lock_irqsave(&sclp_lock
, flags
);
354 if (!__sclp_can_add_request(req
)) {
355 spin_unlock_irqrestore(&sclp_lock
, flags
);
358 req
->status
= SCLP_REQ_QUEUED
;
359 req
->start_count
= 0;
360 list_add_tail(&req
->list
, &sclp_req_queue
);
362 if (req
->queue_timeout
) {
363 req
->queue_expires
= jiffies
+ req
->queue_timeout
* HZ
;
364 if (!timer_pending(&sclp_queue_timer
) ||
365 time_after(sclp_queue_timer
.expires
, req
->queue_expires
))
366 mod_timer(&sclp_queue_timer
, req
->queue_expires
);
368 req
->queue_expires
= 0;
369 /* Start if request is first in list */
370 if (sclp_running_state
== sclp_running_state_idle
&&
371 req
->list
.prev
== &sclp_req_queue
) {
373 list_del(&req
->list
);
377 rc
= __sclp_start_request(req
);
379 list_del(&req
->list
);
382 spin_unlock_irqrestore(&sclp_lock
, flags
);
386 EXPORT_SYMBOL(sclp_add_request
);
388 /* Dispatch events found in request buffer to registered listeners. Return 0
389 * if all events were dispatched, non-zero otherwise. */
391 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
394 struct evbuf_header
*evbuf
;
396 struct sclp_register
*reg
;
400 spin_lock_irqsave(&sclp_lock
, flags
);
402 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
403 offset
+= evbuf
->length
) {
404 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
405 /* Check for malformed hardware response */
406 if (evbuf
->length
== 0)
408 /* Search for event handler */
410 list_for_each(l
, &sclp_reg_list
) {
411 reg
= list_entry(l
, struct sclp_register
, list
);
412 if (reg
->receive_mask
& (1 << (32 - evbuf
->type
)))
417 if (reg
&& reg
->receiver_fn
) {
418 spin_unlock_irqrestore(&sclp_lock
, flags
);
419 reg
->receiver_fn(evbuf
);
420 spin_lock_irqsave(&sclp_lock
, flags
);
421 } else if (reg
== NULL
)
424 spin_unlock_irqrestore(&sclp_lock
, flags
);
428 /* Read event data request callback. */
430 sclp_read_cb(struct sclp_req
*req
, void *data
)
433 struct sccb_header
*sccb
;
435 sccb
= (struct sccb_header
*) req
->sccb
;
436 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
437 sccb
->response_code
== 0x220))
438 sclp_dispatch_evbufs(sccb
);
439 spin_lock_irqsave(&sclp_lock
, flags
);
440 sclp_reading_state
= sclp_reading_state_idle
;
441 spin_unlock_irqrestore(&sclp_lock
, flags
);
444 /* Prepare read event data request. Called while sclp_lock is locked. */
445 static void __sclp_make_read_req(void)
447 struct sccb_header
*sccb
;
449 sccb
= (struct sccb_header
*) sclp_read_sccb
;
451 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
452 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
453 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
454 sclp_read_req
.start_count
= 0;
455 sclp_read_req
.callback
= sclp_read_cb
;
456 sclp_read_req
.sccb
= sccb
;
457 sccb
->length
= PAGE_SIZE
;
458 sccb
->function_code
= 0;
459 sccb
->control_mask
[2] = 0x80;
462 /* Search request list for request with matching sccb. Return request if found,
463 * NULL otherwise. Called while sclp_lock is locked. */
464 static inline struct sclp_req
*
465 __sclp_find_req(u32 sccb
)
468 struct sclp_req
*req
;
470 list_for_each(l
, &sclp_req_queue
) {
471 req
= list_entry(l
, struct sclp_req
, list
);
472 if (sccb
== (u32
) (addr_t
) req
->sccb
)
478 /* Handler for external interruption. Perform request post-processing.
479 * Prepare read event data request if necessary. Start processing of next
480 * request on queue. */
481 static void sclp_interrupt_handler(struct ext_code ext_code
,
482 unsigned int param32
, unsigned long param64
)
484 struct sclp_req
*req
;
488 inc_irq_stat(IRQEXT_SCP
);
489 spin_lock(&sclp_lock
);
490 finished_sccb
= param32
& 0xfffffff8;
491 evbuf_pending
= param32
& 0x3;
493 del_timer(&sclp_request_timer
);
494 sclp_running_state
= sclp_running_state_reset_pending
;
495 req
= __sclp_find_req(finished_sccb
);
497 /* Request post-processing */
498 list_del(&req
->list
);
499 req
->status
= SCLP_REQ_DONE
;
501 spin_unlock(&sclp_lock
);
502 req
->callback(req
, req
->callback_data
);
503 spin_lock(&sclp_lock
);
506 sclp_running_state
= sclp_running_state_idle
;
509 sclp_activation_state
== sclp_activation_state_active
)
510 __sclp_queue_read_req();
511 spin_unlock(&sclp_lock
);
512 sclp_process_queue();
515 /* Convert interval in jiffies to TOD ticks. */
517 sclp_tod_from_jiffies(unsigned long jiffies
)
519 return (u64
) (jiffies
/ HZ
) << 32;
522 /* Wait until a currently running request finished. Note: while this function
523 * is running, no timers are served on the calling CPU. */
527 unsigned long long old_tick
;
529 unsigned long cr0
, cr0_sync
;
533 /* We'll be disabling timer interrupts, so we need a custom timeout
536 if (timer_pending(&sclp_request_timer
)) {
537 /* Get timeout TOD value */
538 timeout
= get_tod_clock_fast() +
539 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
542 local_irq_save(flags
);
543 /* Prevent bottom half from executing once we force interrupts open */
544 irq_context
= in_interrupt();
547 /* Enable service-signal interruption, disable timer interrupts */
548 old_tick
= local_tick_disable();
550 __ctl_store(cr0
, 0, 0);
551 cr0_sync
= cr0
& ~CR0_IRQ_SUBCLASS_MASK
;
552 cr0_sync
|= 1UL << (63 - 54);
553 __ctl_load(cr0_sync
, 0, 0);
554 __arch_local_irq_stosm(0x01);
555 /* Loop until driver state indicates finished request */
556 while (sclp_running_state
!= sclp_running_state_idle
) {
557 /* Check for expired request timer */
558 if (timer_pending(&sclp_request_timer
) &&
559 get_tod_clock_fast() > timeout
&&
560 del_timer(&sclp_request_timer
))
561 sclp_request_timer
.function(sclp_request_timer
.data
);
565 __ctl_load(cr0
, 0, 0);
568 local_tick_enable(old_tick
);
569 local_irq_restore(flags
);
571 EXPORT_SYMBOL(sclp_sync_wait
);
573 /* Dispatch changes in send and receive mask to registered listeners. */
575 sclp_dispatch_state_change(void)
578 struct sclp_register
*reg
;
580 sccb_mask_t receive_mask
;
581 sccb_mask_t send_mask
;
584 spin_lock_irqsave(&sclp_lock
, flags
);
586 list_for_each(l
, &sclp_reg_list
) {
587 reg
= list_entry(l
, struct sclp_register
, list
);
588 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
589 send_mask
= reg
->receive_mask
& sclp_send_mask
;
590 if (reg
->sclp_receive_mask
!= receive_mask
||
591 reg
->sclp_send_mask
!= send_mask
) {
592 reg
->sclp_receive_mask
= receive_mask
;
593 reg
->sclp_send_mask
= send_mask
;
598 spin_unlock_irqrestore(&sclp_lock
, flags
);
599 if (reg
&& reg
->state_change_fn
)
600 reg
->state_change_fn(reg
);
604 struct sclp_statechangebuf
{
605 struct evbuf_header header
;
606 u8 validity_sclp_active_facility_mask
: 1;
607 u8 validity_sclp_receive_mask
: 1;
608 u8 validity_sclp_send_mask
: 1;
609 u8 validity_read_data_function_mask
: 1;
612 u64 sclp_active_facility_mask
;
613 sccb_mask_t sclp_receive_mask
;
614 sccb_mask_t sclp_send_mask
;
615 u32 read_data_function_mask
;
616 } __attribute__((packed
));
619 /* State change event callback. Inform listeners of changes. */
621 sclp_state_change_cb(struct evbuf_header
*evbuf
)
624 struct sclp_statechangebuf
*scbuf
;
626 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
627 if (scbuf
->mask_length
!= sizeof(sccb_mask_t
))
629 spin_lock_irqsave(&sclp_lock
, flags
);
630 if (scbuf
->validity_sclp_receive_mask
)
631 sclp_receive_mask
= scbuf
->sclp_receive_mask
;
632 if (scbuf
->validity_sclp_send_mask
)
633 sclp_send_mask
= scbuf
->sclp_send_mask
;
634 spin_unlock_irqrestore(&sclp_lock
, flags
);
635 if (scbuf
->validity_sclp_active_facility_mask
)
636 sclp
.facilities
= scbuf
->sclp_active_facility_mask
;
637 sclp_dispatch_state_change();
640 static struct sclp_register sclp_state_change_event
= {
641 .receive_mask
= EVTYP_STATECHANGE_MASK
,
642 .receiver_fn
= sclp_state_change_cb
645 /* Calculate receive and send mask of currently registered listeners.
646 * Called while sclp_lock is locked. */
648 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
651 struct sclp_register
*t
;
655 list_for_each(l
, &sclp_reg_list
) {
656 t
= list_entry(l
, struct sclp_register
, list
);
657 *receive_mask
|= t
->receive_mask
;
658 *send_mask
|= t
->send_mask
;
662 /* Register event listener. Return 0 on success, non-zero otherwise. */
664 sclp_register(struct sclp_register
*reg
)
667 sccb_mask_t receive_mask
;
668 sccb_mask_t send_mask
;
674 spin_lock_irqsave(&sclp_lock
, flags
);
675 /* Check event mask for collisions */
676 __sclp_get_mask(&receive_mask
, &send_mask
);
677 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
678 spin_unlock_irqrestore(&sclp_lock
, flags
);
681 /* Trigger initial state change callback */
682 reg
->sclp_receive_mask
= 0;
683 reg
->sclp_send_mask
= 0;
684 reg
->pm_event_posted
= 0;
685 list_add(®
->list
, &sclp_reg_list
);
686 spin_unlock_irqrestore(&sclp_lock
, flags
);
687 rc
= sclp_init_mask(1);
689 spin_lock_irqsave(&sclp_lock
, flags
);
690 list_del(®
->list
);
691 spin_unlock_irqrestore(&sclp_lock
, flags
);
696 EXPORT_SYMBOL(sclp_register
);
698 /* Unregister event listener. */
700 sclp_unregister(struct sclp_register
*reg
)
704 spin_lock_irqsave(&sclp_lock
, flags
);
705 list_del(®
->list
);
706 spin_unlock_irqrestore(&sclp_lock
, flags
);
710 EXPORT_SYMBOL(sclp_unregister
);
712 /* Remove event buffers which are marked processed. Return the number of
713 * remaining event buffers. */
715 sclp_remove_processed(struct sccb_header
*sccb
)
717 struct evbuf_header
*evbuf
;
721 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
723 remaining
= sccb
->length
- sizeof(struct sccb_header
);
724 while (remaining
> 0) {
725 remaining
-= evbuf
->length
;
726 if (evbuf
->flags
& 0x80) {
727 sccb
->length
-= evbuf
->length
;
728 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
732 evbuf
= (struct evbuf_header
*)
733 ((addr_t
) evbuf
+ evbuf
->length
);
739 EXPORT_SYMBOL(sclp_remove_processed
);
741 /* Prepare init mask request. Called while sclp_lock is locked. */
743 __sclp_make_init_req(u32 receive_mask
, u32 send_mask
)
745 struct init_sccb
*sccb
;
747 sccb
= (struct init_sccb
*) sclp_init_sccb
;
749 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
750 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
751 sclp_init_req
.status
= SCLP_REQ_FILLED
;
752 sclp_init_req
.start_count
= 0;
753 sclp_init_req
.callback
= NULL
;
754 sclp_init_req
.callback_data
= NULL
;
755 sclp_init_req
.sccb
= sccb
;
756 sccb
->header
.length
= sizeof(struct init_sccb
);
757 sccb
->mask_length
= sizeof(sccb_mask_t
);
758 sccb
->receive_mask
= receive_mask
;
759 sccb
->send_mask
= send_mask
;
760 sccb
->sclp_receive_mask
= 0;
761 sccb
->sclp_send_mask
= 0;
764 /* Start init mask request. If calculate is non-zero, calculate the mask as
765 * requested by registered listeners. Use zero mask otherwise. Return 0 on
766 * success, non-zero otherwise. */
768 sclp_init_mask(int calculate
)
771 struct init_sccb
*sccb
= (struct init_sccb
*) sclp_init_sccb
;
772 sccb_mask_t receive_mask
;
773 sccb_mask_t send_mask
;
778 spin_lock_irqsave(&sclp_lock
, flags
);
779 /* Check if interface is in appropriate state */
780 if (sclp_mask_state
!= sclp_mask_state_idle
) {
781 spin_unlock_irqrestore(&sclp_lock
, flags
);
784 if (sclp_activation_state
== sclp_activation_state_inactive
) {
785 spin_unlock_irqrestore(&sclp_lock
, flags
);
788 sclp_mask_state
= sclp_mask_state_initializing
;
791 __sclp_get_mask(&receive_mask
, &send_mask
);
797 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
798 /* Prepare request */
799 __sclp_make_init_req(receive_mask
, send_mask
);
800 spin_unlock_irqrestore(&sclp_lock
, flags
);
801 if (sclp_add_request(&sclp_init_req
)) {
802 /* Try again later */
803 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
804 while (time_before(jiffies
, wait
))
806 spin_lock_irqsave(&sclp_lock
, flags
);
809 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
810 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
812 spin_lock_irqsave(&sclp_lock
, flags
);
813 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
814 sccb
->header
.response_code
== 0x20) {
815 /* Successful request */
817 sclp_receive_mask
= sccb
->sclp_receive_mask
;
818 sclp_send_mask
= sccb
->sclp_send_mask
;
820 sclp_receive_mask
= 0;
823 spin_unlock_irqrestore(&sclp_lock
, flags
);
824 sclp_dispatch_state_change();
825 spin_lock_irqsave(&sclp_lock
, flags
);
830 sclp_mask_state
= sclp_mask_state_idle
;
831 spin_unlock_irqrestore(&sclp_lock
, flags
);
835 /* Deactivate SCLP interface. On success, new requests will be rejected,
836 * events will no longer be dispatched. Return 0 on success, non-zero
839 sclp_deactivate(void)
844 spin_lock_irqsave(&sclp_lock
, flags
);
845 /* Deactivate can only be called when active */
846 if (sclp_activation_state
!= sclp_activation_state_active
) {
847 spin_unlock_irqrestore(&sclp_lock
, flags
);
850 sclp_activation_state
= sclp_activation_state_deactivating
;
851 spin_unlock_irqrestore(&sclp_lock
, flags
);
852 rc
= sclp_init_mask(0);
853 spin_lock_irqsave(&sclp_lock
, flags
);
855 sclp_activation_state
= sclp_activation_state_inactive
;
857 sclp_activation_state
= sclp_activation_state_active
;
858 spin_unlock_irqrestore(&sclp_lock
, flags
);
862 EXPORT_SYMBOL(sclp_deactivate
);
864 /* Reactivate SCLP interface after sclp_deactivate. On success, new
865 * requests will be accepted, events will be dispatched again. Return 0 on
866 * success, non-zero otherwise. */
868 sclp_reactivate(void)
873 spin_lock_irqsave(&sclp_lock
, flags
);
874 /* Reactivate can only be called when inactive */
875 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
876 spin_unlock_irqrestore(&sclp_lock
, flags
);
879 sclp_activation_state
= sclp_activation_state_activating
;
880 spin_unlock_irqrestore(&sclp_lock
, flags
);
881 rc
= sclp_init_mask(1);
882 spin_lock_irqsave(&sclp_lock
, flags
);
884 sclp_activation_state
= sclp_activation_state_active
;
886 sclp_activation_state
= sclp_activation_state_inactive
;
887 spin_unlock_irqrestore(&sclp_lock
, flags
);
891 EXPORT_SYMBOL(sclp_reactivate
);
893 /* Handler for external interruption used during initialization. Modify
894 * request state to done. */
895 static void sclp_check_handler(struct ext_code ext_code
,
896 unsigned int param32
, unsigned long param64
)
900 inc_irq_stat(IRQEXT_SCP
);
901 finished_sccb
= param32
& 0xfffffff8;
902 /* Is this the interrupt we are waiting for? */
903 if (finished_sccb
== 0)
905 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
)
906 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
908 spin_lock(&sclp_lock
);
909 if (sclp_running_state
== sclp_running_state_running
) {
910 sclp_init_req
.status
= SCLP_REQ_DONE
;
911 sclp_running_state
= sclp_running_state_idle
;
913 spin_unlock(&sclp_lock
);
916 /* Initial init mask request timed out. Modify request state to failed. */
918 sclp_check_timeout(unsigned long data
)
922 spin_lock_irqsave(&sclp_lock
, flags
);
923 if (sclp_running_state
== sclp_running_state_running
) {
924 sclp_init_req
.status
= SCLP_REQ_FAILED
;
925 sclp_running_state
= sclp_running_state_idle
;
927 spin_unlock_irqrestore(&sclp_lock
, flags
);
930 /* Perform a check of the SCLP interface. Return zero if the interface is
931 * available and there are no pending requests from a previous instance.
932 * Return non-zero otherwise. */
934 sclp_check_interface(void)
936 struct init_sccb
*sccb
;
941 spin_lock_irqsave(&sclp_lock
, flags
);
942 /* Prepare init mask command */
943 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
945 spin_unlock_irqrestore(&sclp_lock
, flags
);
948 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
949 __sclp_make_init_req(0, 0);
950 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
951 rc
= sclp_service_call(sclp_init_req
.command
, sccb
);
954 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
955 sclp_running_state
= sclp_running_state_running
;
956 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
957 sclp_check_timeout
, 0);
958 spin_unlock_irqrestore(&sclp_lock
, flags
);
959 /* Enable service-signal interruption - needs to happen
960 * with IRQs enabled. */
961 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
962 /* Wait for signal from interrupt or timeout */
964 /* Disable service-signal interruption - needs to happen
965 * with IRQs enabled. */
966 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
967 spin_lock_irqsave(&sclp_lock
, flags
);
968 del_timer(&sclp_request_timer
);
969 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
970 sccb
->header
.response_code
== 0x20) {
976 unregister_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
977 spin_unlock_irqrestore(&sclp_lock
, flags
);
981 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
982 * events from interfering with rebooted system. */
984 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
990 static struct notifier_block sclp_reboot_notifier
= {
991 .notifier_call
= sclp_reboot_event
995 * Suspend/resume SCLP notifier implementation
998 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event
, int rollback
)
1000 struct sclp_register
*reg
;
1001 unsigned long flags
;
1004 spin_lock_irqsave(&sclp_lock
, flags
);
1005 list_for_each_entry(reg
, &sclp_reg_list
, list
)
1006 reg
->pm_event_posted
= 0;
1007 spin_unlock_irqrestore(&sclp_lock
, flags
);
1010 spin_lock_irqsave(&sclp_lock
, flags
);
1011 list_for_each_entry(reg
, &sclp_reg_list
, list
) {
1012 if (rollback
&& reg
->pm_event_posted
)
1014 if (!rollback
&& !reg
->pm_event_posted
)
1017 spin_unlock_irqrestore(&sclp_lock
, flags
);
1020 spin_unlock_irqrestore(&sclp_lock
, flags
);
1021 if (reg
->pm_event_fn
)
1022 reg
->pm_event_fn(reg
, sclp_pm_event
);
1023 reg
->pm_event_posted
= rollback
? 0 : 1;
1028 * Susend/resume callbacks for platform device
1031 static int sclp_freeze(struct device
*dev
)
1033 unsigned long flags
;
1036 sclp_pm_event(SCLP_PM_EVENT_FREEZE
, 0);
1038 spin_lock_irqsave(&sclp_lock
, flags
);
1039 sclp_suspend_state
= sclp_suspend_state_suspended
;
1040 spin_unlock_irqrestore(&sclp_lock
, flags
);
1042 /* Init supend data */
1043 memset(&sclp_suspend_req
, 0, sizeof(sclp_suspend_req
));
1044 sclp_suspend_req
.callback
= sclp_suspend_req_cb
;
1045 sclp_suspend_req
.status
= SCLP_REQ_FILLED
;
1046 init_completion(&sclp_request_queue_flushed
);
1048 rc
= sclp_add_request(&sclp_suspend_req
);
1050 wait_for_completion(&sclp_request_queue_flushed
);
1051 else if (rc
!= -ENODATA
)
1054 rc
= sclp_deactivate();
1060 spin_lock_irqsave(&sclp_lock
, flags
);
1061 sclp_suspend_state
= sclp_suspend_state_running
;
1062 spin_unlock_irqrestore(&sclp_lock
, flags
);
1063 sclp_pm_event(SCLP_PM_EVENT_THAW
, 1);
1067 static int sclp_undo_suspend(enum sclp_pm_event event
)
1069 unsigned long flags
;
1072 rc
= sclp_reactivate();
1076 spin_lock_irqsave(&sclp_lock
, flags
);
1077 sclp_suspend_state
= sclp_suspend_state_running
;
1078 spin_unlock_irqrestore(&sclp_lock
, flags
);
1080 sclp_pm_event(event
, 0);
1084 static int sclp_thaw(struct device
*dev
)
1086 return sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1089 static int sclp_restore(struct device
*dev
)
1091 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE
);
1094 static const struct dev_pm_ops sclp_pm_ops
= {
1095 .freeze
= sclp_freeze
,
1097 .restore
= sclp_restore
,
1100 static ssize_t
con_pages_show(struct device_driver
*dev
, char *buf
)
1102 return sprintf(buf
, "%i\n", sclp_console_pages
);
1105 static DRIVER_ATTR_RO(con_pages
);
1107 static ssize_t
con_drop_show(struct device_driver
*dev
, char *buf
)
1109 return sprintf(buf
, "%i\n", sclp_console_drop
);
1112 static DRIVER_ATTR_RO(con_drop
);
1114 static ssize_t
con_full_show(struct device_driver
*dev
, char *buf
)
1116 return sprintf(buf
, "%lu\n", sclp_console_full
);
1119 static DRIVER_ATTR_RO(con_full
);
1121 static struct attribute
*sclp_drv_attrs
[] = {
1122 &driver_attr_con_pages
.attr
,
1123 &driver_attr_con_drop
.attr
,
1124 &driver_attr_con_full
.attr
,
1127 static struct attribute_group sclp_drv_attr_group
= {
1128 .attrs
= sclp_drv_attrs
,
1130 static const struct attribute_group
*sclp_drv_attr_groups
[] = {
1131 &sclp_drv_attr_group
,
1135 static struct platform_driver sclp_pdrv
= {
1139 .groups
= sclp_drv_attr_groups
,
1143 static struct platform_device
*sclp_pdev
;
1145 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1150 unsigned long flags
;
1153 spin_lock_irqsave(&sclp_lock
, flags
);
1154 /* Check for previous or running initialization */
1155 if (sclp_init_state
!= sclp_init_state_uninitialized
)
1157 sclp_init_state
= sclp_init_state_initializing
;
1158 /* Set up variables */
1159 INIT_LIST_HEAD(&sclp_req_queue
);
1160 INIT_LIST_HEAD(&sclp_reg_list
);
1161 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
1162 init_timer(&sclp_request_timer
);
1163 init_timer(&sclp_queue_timer
);
1164 sclp_queue_timer
.function
= sclp_req_queue_timeout
;
1165 /* Check interface */
1166 spin_unlock_irqrestore(&sclp_lock
, flags
);
1167 rc
= sclp_check_interface();
1168 spin_lock_irqsave(&sclp_lock
, flags
);
1170 goto fail_init_state_uninitialized
;
1171 /* Register reboot handler */
1172 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
1174 goto fail_init_state_uninitialized
;
1175 /* Register interrupt handler */
1176 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_interrupt_handler
);
1178 goto fail_unregister_reboot_notifier
;
1179 sclp_init_state
= sclp_init_state_initialized
;
1180 spin_unlock_irqrestore(&sclp_lock
, flags
);
1181 /* Enable service-signal external interruption - needs to happen with
1183 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1187 fail_unregister_reboot_notifier
:
1188 unregister_reboot_notifier(&sclp_reboot_notifier
);
1189 fail_init_state_uninitialized
:
1190 sclp_init_state
= sclp_init_state_uninitialized
;
1192 spin_unlock_irqrestore(&sclp_lock
, flags
);
1197 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1198 * to print the panic message.
1200 static int sclp_panic_notify(struct notifier_block
*self
,
1201 unsigned long event
, void *data
)
1203 if (sclp_suspend_state
== sclp_suspend_state_suspended
)
1204 sclp_undo_suspend(SCLP_PM_EVENT_THAW
);
1208 static struct notifier_block sclp_on_panic_nb
= {
1209 .notifier_call
= sclp_panic_notify
,
1210 .priority
= SCLP_PANIC_PRIO
,
1213 static __init
int sclp_initcall(void)
1217 rc
= platform_driver_register(&sclp_pdrv
);
1221 sclp_pdev
= platform_device_register_simple("sclp", -1, NULL
, 0);
1222 rc
= PTR_ERR_OR_ZERO(sclp_pdev
);
1224 goto fail_platform_driver_unregister
;
1226 rc
= atomic_notifier_chain_register(&panic_notifier_list
,
1229 goto fail_platform_device_unregister
;
1233 fail_platform_device_unregister
:
1234 platform_device_unregister(sclp_pdev
);
1235 fail_platform_driver_unregister
:
1236 platform_driver_unregister(&sclp_pdrv
);
1240 arch_initcall(sclp_initcall
);