1 // SPDX-License-Identifier: GPL-2.0
3 * core function to access sclp interface
5 * Copyright IBM Corp. 1999, 2009
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/kernel_stat.h>
12 #include <linux/module.h>
13 #include <linux/err.h>
14 #include <linux/panic_notifier.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/timer.h>
18 #include <linux/reboot.h>
19 #include <linux/jiffies.h>
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
27 #define SCLP_HEADER "sclp: "
29 /* Lock to protect internal data consistency. */
30 static DEFINE_SPINLOCK(sclp_lock
);
32 /* Mask of events that we can send to the sclp interface. */
33 static sccb_mask_t sclp_receive_mask
;
35 /* Mask of events that we can receive from the sclp interface. */
36 static sccb_mask_t sclp_send_mask
;
38 /* List of registered event listeners and senders. */
39 static LIST_HEAD(sclp_reg_list
);
41 /* List of queued requests. */
42 static LIST_HEAD(sclp_req_queue
);
44 /* Data for read and and init requests. */
45 static struct sclp_req sclp_read_req
;
46 static struct sclp_req sclp_init_req
;
47 static void *sclp_read_sccb
;
48 static struct init_sccb
*sclp_init_sccb
;
50 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
51 int sclp_console_pages
= SCLP_CONSOLE_PAGES
;
52 /* Flag to indicate if buffer pages are dropped on buffer full condition */
53 int sclp_console_drop
= 1;
54 /* Number of times the console dropped buffer pages */
55 unsigned long sclp_console_full
;
57 static int __init
sclp_setup_console_pages(char *str
)
61 rc
= kstrtoint(str
, 0, &pages
);
62 if (!rc
&& pages
>= SCLP_CONSOLE_PAGES
)
63 sclp_console_pages
= pages
;
67 __setup("sclp_con_pages=", sclp_setup_console_pages
);
69 static int __init
sclp_setup_console_drop(char *str
)
73 rc
= kstrtoint(str
, 0, &drop
);
75 sclp_console_drop
= drop
;
79 __setup("sclp_con_drop=", sclp_setup_console_drop
);
81 /* Timer for request retries. */
82 static struct timer_list sclp_request_timer
;
84 /* Timer for queued requests. */
85 static struct timer_list sclp_queue_timer
;
87 /* Internal state: is a request active at the sclp? */
88 static volatile enum sclp_running_state_t
{
89 sclp_running_state_idle
,
90 sclp_running_state_running
,
91 sclp_running_state_reset_pending
92 } sclp_running_state
= sclp_running_state_idle
;
94 /* Internal state: is a read request pending? */
95 static volatile enum sclp_reading_state_t
{
96 sclp_reading_state_idle
,
97 sclp_reading_state_reading
98 } sclp_reading_state
= sclp_reading_state_idle
;
100 /* Internal state: is the driver currently serving requests? */
101 static volatile enum sclp_activation_state_t
{
102 sclp_activation_state_active
,
103 sclp_activation_state_deactivating
,
104 sclp_activation_state_inactive
,
105 sclp_activation_state_activating
106 } sclp_activation_state
= sclp_activation_state_active
;
108 /* Internal state: is an init mask request pending? */
109 static volatile enum sclp_mask_state_t
{
110 sclp_mask_state_idle
,
111 sclp_mask_state_initializing
112 } sclp_mask_state
= sclp_mask_state_idle
;
114 /* Maximum retry counts */
115 #define SCLP_INIT_RETRY 3
116 #define SCLP_MASK_RETRY 3
118 /* Timeout intervals in seconds.*/
119 #define SCLP_BUSY_INTERVAL 10
120 #define SCLP_RETRY_INTERVAL 30
122 static void sclp_request_timeout(bool force_restart
);
123 static void sclp_process_queue(void);
124 static void __sclp_make_read_req(void);
125 static int sclp_init_mask(int calculate
);
126 static int sclp_init(void);
129 __sclp_queue_read_req(void)
131 if (sclp_reading_state
== sclp_reading_state_idle
) {
132 sclp_reading_state
= sclp_reading_state_reading
;
133 __sclp_make_read_req();
134 /* Add request to head of queue */
135 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
139 /* Set up request retry timer. Called while sclp_lock is locked. */
141 __sclp_set_request_timer(unsigned long time
, void (*cb
)(struct timer_list
*))
143 del_timer(&sclp_request_timer
);
144 sclp_request_timer
.function
= cb
;
145 sclp_request_timer
.expires
= jiffies
+ time
;
146 add_timer(&sclp_request_timer
);
149 static void sclp_request_timeout_restart(struct timer_list
*unused
)
151 sclp_request_timeout(true);
154 static void sclp_request_timeout_normal(struct timer_list
*unused
)
156 sclp_request_timeout(false);
159 /* Request timeout handler. Restart the request queue. If force_restart,
160 * force restart of running request. */
161 static void sclp_request_timeout(bool force_restart
)
165 spin_lock_irqsave(&sclp_lock
, flags
);
167 if (sclp_running_state
== sclp_running_state_running
) {
168 /* Break running state and queue NOP read event request
169 * to get a defined interface state. */
170 __sclp_queue_read_req();
171 sclp_running_state
= sclp_running_state_idle
;
174 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
175 sclp_request_timeout_normal
);
177 spin_unlock_irqrestore(&sclp_lock
, flags
);
178 sclp_process_queue();
182 * Returns the expire value in jiffies of the next pending request timeout,
183 * if any. Needs to be called with sclp_lock.
185 static unsigned long __sclp_req_queue_find_next_timeout(void)
187 unsigned long expires_next
= 0;
188 struct sclp_req
*req
;
190 list_for_each_entry(req
, &sclp_req_queue
, list
) {
191 if (!req
->queue_expires
)
194 (time_before(req
->queue_expires
, expires_next
)))
195 expires_next
= req
->queue_expires
;
201 * Returns expired request, if any, and removes it from the list.
203 static struct sclp_req
*__sclp_req_queue_remove_expired_req(void)
205 unsigned long flags
, now
;
206 struct sclp_req
*req
;
208 spin_lock_irqsave(&sclp_lock
, flags
);
210 /* Don't need list_for_each_safe because we break out after list_del */
211 list_for_each_entry(req
, &sclp_req_queue
, list
) {
212 if (!req
->queue_expires
)
214 if (time_before_eq(req
->queue_expires
, now
)) {
215 if (req
->status
== SCLP_REQ_QUEUED
) {
216 req
->status
= SCLP_REQ_QUEUED_TIMEOUT
;
217 list_del(&req
->list
);
224 spin_unlock_irqrestore(&sclp_lock
, flags
);
229 * Timeout handler for queued requests. Removes request from list and
230 * invokes callback. This timer can be set per request in situations where
231 * waiting too long would be harmful to the system, e.g. during SE reboot.
233 static void sclp_req_queue_timeout(struct timer_list
*unused
)
235 unsigned long flags
, expires_next
;
236 struct sclp_req
*req
;
239 req
= __sclp_req_queue_remove_expired_req();
240 if (req
&& req
->callback
)
241 req
->callback(req
, req
->callback_data
);
244 spin_lock_irqsave(&sclp_lock
, flags
);
245 expires_next
= __sclp_req_queue_find_next_timeout();
247 mod_timer(&sclp_queue_timer
, expires_next
);
248 spin_unlock_irqrestore(&sclp_lock
, flags
);
251 /* Try to start a request. Return zero if the request was successfully
252 * started or if it will be started at a later time. Return non-zero otherwise.
253 * Called while sclp_lock is locked. */
255 __sclp_start_request(struct sclp_req
*req
)
259 if (sclp_running_state
!= sclp_running_state_idle
)
261 del_timer(&sclp_request_timer
);
262 rc
= sclp_service_call(req
->command
, req
->sccb
);
266 /* Successfully started request */
267 req
->status
= SCLP_REQ_RUNNING
;
268 sclp_running_state
= sclp_running_state_running
;
269 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
270 sclp_request_timeout_restart
);
272 } else if (rc
== -EBUSY
) {
273 /* Try again later */
274 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
275 sclp_request_timeout_normal
);
279 req
->status
= SCLP_REQ_FAILED
;
283 /* Try to start queued requests. */
285 sclp_process_queue(void)
287 struct sclp_req
*req
;
291 spin_lock_irqsave(&sclp_lock
, flags
);
292 if (sclp_running_state
!= sclp_running_state_idle
) {
293 spin_unlock_irqrestore(&sclp_lock
, flags
);
296 del_timer(&sclp_request_timer
);
297 while (!list_empty(&sclp_req_queue
)) {
298 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
299 rc
= __sclp_start_request(req
);
303 if (req
->start_count
> 1) {
304 /* Cannot abort already submitted request - could still
305 * be active at the SCLP */
306 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
307 sclp_request_timeout_normal
);
310 /* Post-processing for aborted request */
311 list_del(&req
->list
);
313 spin_unlock_irqrestore(&sclp_lock
, flags
);
314 req
->callback(req
, req
->callback_data
);
315 spin_lock_irqsave(&sclp_lock
, flags
);
318 spin_unlock_irqrestore(&sclp_lock
, flags
);
321 static int __sclp_can_add_request(struct sclp_req
*req
)
323 if (req
== &sclp_init_req
)
325 if (sclp_init_state
!= sclp_init_state_initialized
)
327 if (sclp_activation_state
!= sclp_activation_state_active
)
332 /* Queue a new request. Return zero on success, non-zero otherwise. */
334 sclp_add_request(struct sclp_req
*req
)
339 spin_lock_irqsave(&sclp_lock
, flags
);
340 if (!__sclp_can_add_request(req
)) {
341 spin_unlock_irqrestore(&sclp_lock
, flags
);
344 req
->status
= SCLP_REQ_QUEUED
;
345 req
->start_count
= 0;
346 list_add_tail(&req
->list
, &sclp_req_queue
);
348 if (req
->queue_timeout
) {
349 req
->queue_expires
= jiffies
+ req
->queue_timeout
* HZ
;
350 if (!timer_pending(&sclp_queue_timer
) ||
351 time_after(sclp_queue_timer
.expires
, req
->queue_expires
))
352 mod_timer(&sclp_queue_timer
, req
->queue_expires
);
354 req
->queue_expires
= 0;
355 /* Start if request is first in list */
356 if (sclp_running_state
== sclp_running_state_idle
&&
357 req
->list
.prev
== &sclp_req_queue
) {
358 rc
= __sclp_start_request(req
);
360 list_del(&req
->list
);
362 spin_unlock_irqrestore(&sclp_lock
, flags
);
366 EXPORT_SYMBOL(sclp_add_request
);
368 /* Dispatch events found in request buffer to registered listeners. Return 0
369 * if all events were dispatched, non-zero otherwise. */
371 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
374 struct evbuf_header
*evbuf
;
376 struct sclp_register
*reg
;
380 spin_lock_irqsave(&sclp_lock
, flags
);
382 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
383 offset
+= evbuf
->length
) {
384 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
385 /* Check for malformed hardware response */
386 if (evbuf
->length
== 0)
388 /* Search for event handler */
390 list_for_each(l
, &sclp_reg_list
) {
391 reg
= list_entry(l
, struct sclp_register
, list
);
392 if (reg
->receive_mask
& SCLP_EVTYP_MASK(evbuf
->type
))
397 if (reg
&& reg
->receiver_fn
) {
398 spin_unlock_irqrestore(&sclp_lock
, flags
);
399 reg
->receiver_fn(evbuf
);
400 spin_lock_irqsave(&sclp_lock
, flags
);
401 } else if (reg
== NULL
)
404 spin_unlock_irqrestore(&sclp_lock
, flags
);
408 /* Read event data request callback. */
410 sclp_read_cb(struct sclp_req
*req
, void *data
)
413 struct sccb_header
*sccb
;
415 sccb
= (struct sccb_header
*) req
->sccb
;
416 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
417 sccb
->response_code
== 0x220))
418 sclp_dispatch_evbufs(sccb
);
419 spin_lock_irqsave(&sclp_lock
, flags
);
420 sclp_reading_state
= sclp_reading_state_idle
;
421 spin_unlock_irqrestore(&sclp_lock
, flags
);
424 /* Prepare read event data request. Called while sclp_lock is locked. */
425 static void __sclp_make_read_req(void)
427 struct sccb_header
*sccb
;
429 sccb
= (struct sccb_header
*) sclp_read_sccb
;
431 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
432 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
433 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
434 sclp_read_req
.start_count
= 0;
435 sclp_read_req
.callback
= sclp_read_cb
;
436 sclp_read_req
.sccb
= sccb
;
437 sccb
->length
= PAGE_SIZE
;
438 sccb
->function_code
= 0;
439 sccb
->control_mask
[2] = 0x80;
442 /* Search request list for request with matching sccb. Return request if found,
443 * NULL otherwise. Called while sclp_lock is locked. */
444 static inline struct sclp_req
*
445 __sclp_find_req(u32 sccb
)
448 struct sclp_req
*req
;
450 list_for_each(l
, &sclp_req_queue
) {
451 req
= list_entry(l
, struct sclp_req
, list
);
452 if (sccb
== (u32
) (addr_t
) req
->sccb
)
458 /* Handler for external interruption. Perform request post-processing.
459 * Prepare read event data request if necessary. Start processing of next
460 * request on queue. */
461 static void sclp_interrupt_handler(struct ext_code ext_code
,
462 unsigned int param32
, unsigned long param64
)
464 struct sclp_req
*req
;
468 inc_irq_stat(IRQEXT_SCP
);
469 spin_lock(&sclp_lock
);
470 finished_sccb
= param32
& 0xfffffff8;
471 evbuf_pending
= param32
& 0x3;
473 del_timer(&sclp_request_timer
);
474 sclp_running_state
= sclp_running_state_reset_pending
;
475 req
= __sclp_find_req(finished_sccb
);
477 /* Request post-processing */
478 list_del(&req
->list
);
479 req
->status
= SCLP_REQ_DONE
;
481 spin_unlock(&sclp_lock
);
482 req
->callback(req
, req
->callback_data
);
483 spin_lock(&sclp_lock
);
486 sclp_running_state
= sclp_running_state_idle
;
489 sclp_activation_state
== sclp_activation_state_active
)
490 __sclp_queue_read_req();
491 spin_unlock(&sclp_lock
);
492 sclp_process_queue();
495 /* Convert interval in jiffies to TOD ticks. */
497 sclp_tod_from_jiffies(unsigned long jiffies
)
499 return (u64
) (jiffies
/ HZ
) << 32;
502 /* Wait until a currently running request finished. Note: while this function
503 * is running, no timers are served on the calling CPU. */
507 unsigned long long old_tick
;
509 unsigned long cr0
, cr0_sync
;
513 /* We'll be disabling timer interrupts, so we need a custom timeout
516 if (timer_pending(&sclp_request_timer
)) {
517 /* Get timeout TOD value */
518 timeout
= get_tod_clock_fast() +
519 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
522 local_irq_save(flags
);
523 /* Prevent bottom half from executing once we force interrupts open */
524 irq_context
= in_interrupt();
527 /* Enable service-signal interruption, disable timer interrupts */
528 old_tick
= local_tick_disable();
530 __ctl_store(cr0
, 0, 0);
531 cr0_sync
= cr0
& ~CR0_IRQ_SUBCLASS_MASK
;
532 cr0_sync
|= 1UL << (63 - 54);
533 __ctl_load(cr0_sync
, 0, 0);
534 __arch_local_irq_stosm(0x01);
535 /* Loop until driver state indicates finished request */
536 while (sclp_running_state
!= sclp_running_state_idle
) {
537 /* Check for expired request timer */
538 if (timer_pending(&sclp_request_timer
) &&
539 get_tod_clock_fast() > timeout
&&
540 del_timer(&sclp_request_timer
))
541 sclp_request_timer
.function(&sclp_request_timer
);
545 __ctl_load(cr0
, 0, 0);
548 local_tick_enable(old_tick
);
549 local_irq_restore(flags
);
551 EXPORT_SYMBOL(sclp_sync_wait
);
553 /* Dispatch changes in send and receive mask to registered listeners. */
555 sclp_dispatch_state_change(void)
558 struct sclp_register
*reg
;
560 sccb_mask_t receive_mask
;
561 sccb_mask_t send_mask
;
564 spin_lock_irqsave(&sclp_lock
, flags
);
566 list_for_each(l
, &sclp_reg_list
) {
567 reg
= list_entry(l
, struct sclp_register
, list
);
568 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
569 send_mask
= reg
->receive_mask
& sclp_send_mask
;
570 if (reg
->sclp_receive_mask
!= receive_mask
||
571 reg
->sclp_send_mask
!= send_mask
) {
572 reg
->sclp_receive_mask
= receive_mask
;
573 reg
->sclp_send_mask
= send_mask
;
578 spin_unlock_irqrestore(&sclp_lock
, flags
);
579 if (reg
&& reg
->state_change_fn
)
580 reg
->state_change_fn(reg
);
584 struct sclp_statechangebuf
{
585 struct evbuf_header header
;
586 u8 validity_sclp_active_facility_mask
: 1;
587 u8 validity_sclp_receive_mask
: 1;
588 u8 validity_sclp_send_mask
: 1;
589 u8 validity_read_data_function_mask
: 1;
592 u64 sclp_active_facility_mask
;
593 u8 masks
[2 * 1021 + 4]; /* variable length */
595 * u8 sclp_receive_mask[mask_length];
596 * u8 sclp_send_mask[mask_length];
597 * u32 read_data_function_mask;
599 } __attribute__((packed
));
602 /* State change event callback. Inform listeners of changes. */
604 sclp_state_change_cb(struct evbuf_header
*evbuf
)
607 struct sclp_statechangebuf
*scbuf
;
609 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf
) > PAGE_SIZE
);
611 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
612 spin_lock_irqsave(&sclp_lock
, flags
);
613 if (scbuf
->validity_sclp_receive_mask
)
614 sclp_receive_mask
= sccb_get_recv_mask(scbuf
);
615 if (scbuf
->validity_sclp_send_mask
)
616 sclp_send_mask
= sccb_get_send_mask(scbuf
);
617 spin_unlock_irqrestore(&sclp_lock
, flags
);
618 if (scbuf
->validity_sclp_active_facility_mask
)
619 sclp
.facilities
= scbuf
->sclp_active_facility_mask
;
620 sclp_dispatch_state_change();
623 static struct sclp_register sclp_state_change_event
= {
624 .receive_mask
= EVTYP_STATECHANGE_MASK
,
625 .receiver_fn
= sclp_state_change_cb
628 /* Calculate receive and send mask of currently registered listeners.
629 * Called while sclp_lock is locked. */
631 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
634 struct sclp_register
*t
;
638 list_for_each(l
, &sclp_reg_list
) {
639 t
= list_entry(l
, struct sclp_register
, list
);
640 *receive_mask
|= t
->receive_mask
;
641 *send_mask
|= t
->send_mask
;
645 /* Register event listener. Return 0 on success, non-zero otherwise. */
647 sclp_register(struct sclp_register
*reg
)
650 sccb_mask_t receive_mask
;
651 sccb_mask_t send_mask
;
657 spin_lock_irqsave(&sclp_lock
, flags
);
658 /* Check event mask for collisions */
659 __sclp_get_mask(&receive_mask
, &send_mask
);
660 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
661 spin_unlock_irqrestore(&sclp_lock
, flags
);
664 /* Trigger initial state change callback */
665 reg
->sclp_receive_mask
= 0;
666 reg
->sclp_send_mask
= 0;
667 list_add(®
->list
, &sclp_reg_list
);
668 spin_unlock_irqrestore(&sclp_lock
, flags
);
669 rc
= sclp_init_mask(1);
671 spin_lock_irqsave(&sclp_lock
, flags
);
672 list_del(®
->list
);
673 spin_unlock_irqrestore(&sclp_lock
, flags
);
678 EXPORT_SYMBOL(sclp_register
);
680 /* Unregister event listener. */
682 sclp_unregister(struct sclp_register
*reg
)
686 spin_lock_irqsave(&sclp_lock
, flags
);
687 list_del(®
->list
);
688 spin_unlock_irqrestore(&sclp_lock
, flags
);
692 EXPORT_SYMBOL(sclp_unregister
);
694 /* Remove event buffers which are marked processed. Return the number of
695 * remaining event buffers. */
697 sclp_remove_processed(struct sccb_header
*sccb
)
699 struct evbuf_header
*evbuf
;
703 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
705 remaining
= sccb
->length
- sizeof(struct sccb_header
);
706 while (remaining
> 0) {
707 remaining
-= evbuf
->length
;
708 if (evbuf
->flags
& 0x80) {
709 sccb
->length
-= evbuf
->length
;
710 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
714 evbuf
= (struct evbuf_header
*)
715 ((addr_t
) evbuf
+ evbuf
->length
);
721 EXPORT_SYMBOL(sclp_remove_processed
);
723 /* Prepare init mask request. Called while sclp_lock is locked. */
725 __sclp_make_init_req(sccb_mask_t receive_mask
, sccb_mask_t send_mask
)
727 struct init_sccb
*sccb
= sclp_init_sccb
;
730 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
731 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
732 sclp_init_req
.status
= SCLP_REQ_FILLED
;
733 sclp_init_req
.start_count
= 0;
734 sclp_init_req
.callback
= NULL
;
735 sclp_init_req
.callback_data
= NULL
;
736 sclp_init_req
.sccb
= sccb
;
737 sccb
->header
.length
= sizeof(*sccb
);
738 if (sclp_mask_compat_mode
)
739 sccb
->mask_length
= SCLP_MASK_SIZE_COMPAT
;
741 sccb
->mask_length
= sizeof(sccb_mask_t
);
742 sccb_set_recv_mask(sccb
, receive_mask
);
743 sccb_set_send_mask(sccb
, send_mask
);
744 sccb_set_sclp_recv_mask(sccb
, 0);
745 sccb_set_sclp_send_mask(sccb
, 0);
748 /* Start init mask request. If calculate is non-zero, calculate the mask as
749 * requested by registered listeners. Use zero mask otherwise. Return 0 on
750 * success, non-zero otherwise. */
752 sclp_init_mask(int calculate
)
755 struct init_sccb
*sccb
= sclp_init_sccb
;
756 sccb_mask_t receive_mask
;
757 sccb_mask_t send_mask
;
762 spin_lock_irqsave(&sclp_lock
, flags
);
763 /* Check if interface is in appropriate state */
764 if (sclp_mask_state
!= sclp_mask_state_idle
) {
765 spin_unlock_irqrestore(&sclp_lock
, flags
);
768 if (sclp_activation_state
== sclp_activation_state_inactive
) {
769 spin_unlock_irqrestore(&sclp_lock
, flags
);
772 sclp_mask_state
= sclp_mask_state_initializing
;
775 __sclp_get_mask(&receive_mask
, &send_mask
);
781 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
782 /* Prepare request */
783 __sclp_make_init_req(receive_mask
, send_mask
);
784 spin_unlock_irqrestore(&sclp_lock
, flags
);
785 if (sclp_add_request(&sclp_init_req
)) {
786 /* Try again later */
787 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
788 while (time_before(jiffies
, wait
))
790 spin_lock_irqsave(&sclp_lock
, flags
);
793 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
794 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
796 spin_lock_irqsave(&sclp_lock
, flags
);
797 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
798 sccb
->header
.response_code
== 0x20) {
799 /* Successful request */
801 sclp_receive_mask
= sccb_get_sclp_recv_mask(sccb
);
802 sclp_send_mask
= sccb_get_sclp_send_mask(sccb
);
804 sclp_receive_mask
= 0;
807 spin_unlock_irqrestore(&sclp_lock
, flags
);
808 sclp_dispatch_state_change();
809 spin_lock_irqsave(&sclp_lock
, flags
);
814 sclp_mask_state
= sclp_mask_state_idle
;
815 spin_unlock_irqrestore(&sclp_lock
, flags
);
819 /* Deactivate SCLP interface. On success, new requests will be rejected,
820 * events will no longer be dispatched. Return 0 on success, non-zero
823 sclp_deactivate(void)
828 spin_lock_irqsave(&sclp_lock
, flags
);
829 /* Deactivate can only be called when active */
830 if (sclp_activation_state
!= sclp_activation_state_active
) {
831 spin_unlock_irqrestore(&sclp_lock
, flags
);
834 sclp_activation_state
= sclp_activation_state_deactivating
;
835 spin_unlock_irqrestore(&sclp_lock
, flags
);
836 rc
= sclp_init_mask(0);
837 spin_lock_irqsave(&sclp_lock
, flags
);
839 sclp_activation_state
= sclp_activation_state_inactive
;
841 sclp_activation_state
= sclp_activation_state_active
;
842 spin_unlock_irqrestore(&sclp_lock
, flags
);
846 EXPORT_SYMBOL(sclp_deactivate
);
848 /* Reactivate SCLP interface after sclp_deactivate. On success, new
849 * requests will be accepted, events will be dispatched again. Return 0 on
850 * success, non-zero otherwise. */
852 sclp_reactivate(void)
857 spin_lock_irqsave(&sclp_lock
, flags
);
858 /* Reactivate can only be called when inactive */
859 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
860 spin_unlock_irqrestore(&sclp_lock
, flags
);
863 sclp_activation_state
= sclp_activation_state_activating
;
864 spin_unlock_irqrestore(&sclp_lock
, flags
);
865 rc
= sclp_init_mask(1);
866 spin_lock_irqsave(&sclp_lock
, flags
);
868 sclp_activation_state
= sclp_activation_state_active
;
870 sclp_activation_state
= sclp_activation_state_inactive
;
871 spin_unlock_irqrestore(&sclp_lock
, flags
);
875 EXPORT_SYMBOL(sclp_reactivate
);
877 /* Handler for external interruption used during initialization. Modify
878 * request state to done. */
879 static void sclp_check_handler(struct ext_code ext_code
,
880 unsigned int param32
, unsigned long param64
)
884 inc_irq_stat(IRQEXT_SCP
);
885 finished_sccb
= param32
& 0xfffffff8;
886 /* Is this the interrupt we are waiting for? */
887 if (finished_sccb
== 0)
889 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
)
890 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
892 spin_lock(&sclp_lock
);
893 if (sclp_running_state
== sclp_running_state_running
) {
894 sclp_init_req
.status
= SCLP_REQ_DONE
;
895 sclp_running_state
= sclp_running_state_idle
;
897 spin_unlock(&sclp_lock
);
900 /* Initial init mask request timed out. Modify request state to failed. */
902 sclp_check_timeout(struct timer_list
*unused
)
906 spin_lock_irqsave(&sclp_lock
, flags
);
907 if (sclp_running_state
== sclp_running_state_running
) {
908 sclp_init_req
.status
= SCLP_REQ_FAILED
;
909 sclp_running_state
= sclp_running_state_idle
;
911 spin_unlock_irqrestore(&sclp_lock
, flags
);
914 /* Perform a check of the SCLP interface. Return zero if the interface is
915 * available and there are no pending requests from a previous instance.
916 * Return non-zero otherwise. */
918 sclp_check_interface(void)
920 struct init_sccb
*sccb
;
925 spin_lock_irqsave(&sclp_lock
, flags
);
926 /* Prepare init mask command */
927 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
929 spin_unlock_irqrestore(&sclp_lock
, flags
);
932 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
933 __sclp_make_init_req(0, 0);
934 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
935 rc
= sclp_service_call(sclp_init_req
.command
, sccb
);
938 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
939 sclp_running_state
= sclp_running_state_running
;
940 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
942 spin_unlock_irqrestore(&sclp_lock
, flags
);
943 /* Enable service-signal interruption - needs to happen
944 * with IRQs enabled. */
945 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
946 /* Wait for signal from interrupt or timeout */
948 /* Disable service-signal interruption - needs to happen
949 * with IRQs enabled. */
950 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
951 spin_lock_irqsave(&sclp_lock
, flags
);
952 del_timer(&sclp_request_timer
);
954 if (sclp_init_req
.status
== SCLP_REQ_DONE
) {
955 if (sccb
->header
.response_code
== 0x20) {
958 } else if (sccb
->header
.response_code
== 0x74f0) {
959 if (!sclp_mask_compat_mode
) {
960 sclp_mask_compat_mode
= true;
966 unregister_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
967 spin_unlock_irqrestore(&sclp_lock
, flags
);
971 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
972 * events from interfering with rebooted system. */
974 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
980 static struct notifier_block sclp_reboot_notifier
= {
981 .notifier_call
= sclp_reboot_event
984 static ssize_t
con_pages_show(struct device_driver
*dev
, char *buf
)
986 return sprintf(buf
, "%i\n", sclp_console_pages
);
989 static DRIVER_ATTR_RO(con_pages
);
991 static ssize_t
con_drop_show(struct device_driver
*dev
, char *buf
)
993 return sprintf(buf
, "%i\n", sclp_console_drop
);
996 static DRIVER_ATTR_RO(con_drop
);
998 static ssize_t
con_full_show(struct device_driver
*dev
, char *buf
)
1000 return sprintf(buf
, "%lu\n", sclp_console_full
);
1003 static DRIVER_ATTR_RO(con_full
);
1005 static struct attribute
*sclp_drv_attrs
[] = {
1006 &driver_attr_con_pages
.attr
,
1007 &driver_attr_con_drop
.attr
,
1008 &driver_attr_con_full
.attr
,
1011 static struct attribute_group sclp_drv_attr_group
= {
1012 .attrs
= sclp_drv_attrs
,
1014 static const struct attribute_group
*sclp_drv_attr_groups
[] = {
1015 &sclp_drv_attr_group
,
1019 static struct platform_driver sclp_pdrv
= {
1022 .groups
= sclp_drv_attr_groups
,
1026 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1031 unsigned long flags
;
1034 spin_lock_irqsave(&sclp_lock
, flags
);
1035 /* Check for previous or running initialization */
1036 if (sclp_init_state
!= sclp_init_state_uninitialized
)
1038 sclp_init_state
= sclp_init_state_initializing
;
1039 sclp_read_sccb
= (void *) __get_free_page(GFP_ATOMIC
| GFP_DMA
);
1040 sclp_init_sccb
= (void *) __get_free_page(GFP_ATOMIC
| GFP_DMA
);
1041 BUG_ON(!sclp_read_sccb
|| !sclp_init_sccb
);
1042 /* Set up variables */
1043 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
1044 timer_setup(&sclp_request_timer
, NULL
, 0);
1045 timer_setup(&sclp_queue_timer
, sclp_req_queue_timeout
, 0);
1046 /* Check interface */
1047 spin_unlock_irqrestore(&sclp_lock
, flags
);
1048 rc
= sclp_check_interface();
1049 spin_lock_irqsave(&sclp_lock
, flags
);
1051 goto fail_init_state_uninitialized
;
1052 /* Register reboot handler */
1053 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
1055 goto fail_init_state_uninitialized
;
1056 /* Register interrupt handler */
1057 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_interrupt_handler
);
1059 goto fail_unregister_reboot_notifier
;
1060 sclp_init_state
= sclp_init_state_initialized
;
1061 spin_unlock_irqrestore(&sclp_lock
, flags
);
1062 /* Enable service-signal external interruption - needs to happen with
1064 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1068 fail_unregister_reboot_notifier
:
1069 unregister_reboot_notifier(&sclp_reboot_notifier
);
1070 fail_init_state_uninitialized
:
1071 sclp_init_state
= sclp_init_state_uninitialized
;
1072 free_page((unsigned long) sclp_read_sccb
);
1073 free_page((unsigned long) sclp_init_sccb
);
1075 spin_unlock_irqrestore(&sclp_lock
, flags
);
1079 static __init
int sclp_initcall(void)
1083 rc
= platform_driver_register(&sclp_pdrv
);
1090 arch_initcall(sclp_initcall
);