1 // SPDX-License-Identifier: GPL-2.0
3 * core function to access sclp interface
5 * Copyright IBM Corp. 1999, 2009
7 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/kernel_stat.h>
12 #include <linux/module.h>
13 #include <linux/err.h>
14 #include <linux/panic_notifier.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/timer.h>
18 #include <linux/reboot.h>
19 #include <linux/jiffies.h>
20 #include <linux/init.h>
21 #include <linux/platform_device.h>
22 #include <asm/types.h>
24 #include <asm/debug.h>
28 #define SCLP_HEADER "sclp: "
30 struct sclp_trace_entry
{
36 #define SCLP_TRACE_ENTRY_SIZE sizeof(struct sclp_trace_entry)
37 #define SCLP_TRACE_MAX_SIZE 128
38 #define SCLP_TRACE_EVENT_MAX_SIZE 64
40 /* Debug trace area intended for all entries in abbreviated form. */
41 DEFINE_STATIC_DEBUG_INFO(sclp_debug
, "sclp", 8, 1, SCLP_TRACE_ENTRY_SIZE
,
42 &debug_hex_ascii_view
);
44 /* Error trace area intended for full entries relating to failed requests. */
45 DEFINE_STATIC_DEBUG_INFO(sclp_debug_err
, "sclp_err", 4, 1,
46 SCLP_TRACE_ENTRY_SIZE
, &debug_hex_ascii_view
);
48 /* Lock to protect internal data consistency. */
49 static DEFINE_SPINLOCK(sclp_lock
);
51 /* Mask of events that we can send to the sclp interface. */
52 static sccb_mask_t sclp_receive_mask
;
54 /* Mask of events that we can receive from the sclp interface. */
55 static sccb_mask_t sclp_send_mask
;
57 /* List of registered event listeners and senders. */
58 static LIST_HEAD(sclp_reg_list
);
60 /* List of queued requests. */
61 static LIST_HEAD(sclp_req_queue
);
63 /* Data for read and and init requests. */
64 static struct sclp_req sclp_read_req
;
65 static struct sclp_req sclp_init_req
;
66 static void *sclp_read_sccb
;
67 static struct init_sccb
*sclp_init_sccb
;
69 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
70 int sclp_console_pages
= SCLP_CONSOLE_PAGES
;
71 /* Flag to indicate if buffer pages are dropped on buffer full condition */
72 int sclp_console_drop
= 1;
73 /* Number of times the console dropped buffer pages */
74 unsigned long sclp_console_full
;
76 /* The currently active SCLP command word. */
77 static sclp_cmdw_t active_cmd
;
79 static inline void sclp_trace(int prio
, char *id
, u32 a
, u64 b
, bool err
)
81 struct sclp_trace_entry e
;
83 memset(&e
, 0, sizeof(e
));
84 strncpy(e
.id
, id
, sizeof(e
.id
));
87 debug_event(&sclp_debug
, prio
, &e
, sizeof(e
));
89 debug_event(&sclp_debug_err
, 0, &e
, sizeof(e
));
92 static inline int no_zeroes_len(void *data
, int len
)
96 /* Minimize trace area usage by not tracing trailing zeroes. */
97 while (len
> SCLP_TRACE_ENTRY_SIZE
&& d
[len
- 1] == 0)
103 static inline void sclp_trace_bin(int prio
, void *d
, int len
, int errlen
)
105 debug_event(&sclp_debug
, prio
, d
, no_zeroes_len(d
, len
));
107 debug_event(&sclp_debug_err
, 0, d
, no_zeroes_len(d
, errlen
));
110 static inline int abbrev_len(sclp_cmdw_t cmd
, struct sccb_header
*sccb
)
112 struct evbuf_header
*evbuf
= (struct evbuf_header
*)(sccb
+ 1);
113 int len
= sccb
->length
, limit
= SCLP_TRACE_MAX_SIZE
;
115 /* Full SCCB tracing if debug level is set to max. */
116 if (sclp_debug
.level
== DEBUG_MAX_LEVEL
)
119 /* Minimal tracing for console writes. */
120 if (cmd
== SCLP_CMDW_WRITE_EVENT_DATA
&&
121 (evbuf
->type
== EVTYP_MSG
|| evbuf
->type
== EVTYP_VT220MSG
))
122 limit
= SCLP_TRACE_ENTRY_SIZE
;
124 return min(len
, limit
);
127 static inline void sclp_trace_sccb(int prio
, char *id
, u32 a
, u64 b
,
128 sclp_cmdw_t cmd
, struct sccb_header
*sccb
,
131 sclp_trace(prio
, id
, a
, b
, err
);
133 sclp_trace_bin(prio
+ 1, sccb
, abbrev_len(cmd
, sccb
),
134 err
? sccb
->length
: 0);
138 static inline void sclp_trace_evbuf(int prio
, char *id
, u32 a
, u64 b
,
139 struct evbuf_header
*evbuf
, bool err
)
141 sclp_trace(prio
, id
, a
, b
, err
);
142 sclp_trace_bin(prio
+ 1, evbuf
,
143 min((int)evbuf
->length
, (int)SCLP_TRACE_EVENT_MAX_SIZE
),
144 err
? evbuf
->length
: 0);
147 static inline void sclp_trace_req(int prio
, char *id
, struct sclp_req
*req
,
150 struct sccb_header
*sccb
= req
->sccb
;
161 summary
.status
= req
->status
;
162 summary
.response
= sccb
? sccb
->response_code
: 0;
163 summary
.timeout
= (u16
)req
->queue_timeout
;
164 summary
.start_count
= (u16
)req
->start_count
;
166 sclp_trace(prio
, id
, (u32
)(addr_t
)sccb
, summary
.b
, err
);
169 static inline void sclp_trace_register(int prio
, char *id
, u32 a
, u64 b
,
170 struct sclp_register
*reg
)
177 d
.receive
= reg
->receive_mask
;
178 d
.send
= reg
->send_mask
;
180 sclp_trace(prio
, id
, a
, b
, false);
181 sclp_trace_bin(prio
, &d
, sizeof(d
), 0);
184 static int __init
sclp_setup_console_pages(char *str
)
188 rc
= kstrtoint(str
, 0, &pages
);
189 if (!rc
&& pages
>= SCLP_CONSOLE_PAGES
)
190 sclp_console_pages
= pages
;
194 __setup("sclp_con_pages=", sclp_setup_console_pages
);
196 static int __init
sclp_setup_console_drop(char *str
)
200 rc
= kstrtoint(str
, 0, &drop
);
202 sclp_console_drop
= drop
;
206 __setup("sclp_con_drop=", sclp_setup_console_drop
);
208 /* Timer for request retries. */
209 static struct timer_list sclp_request_timer
;
211 /* Timer for queued requests. */
212 static struct timer_list sclp_queue_timer
;
214 /* Internal state: is a request active at the sclp? */
215 static volatile enum sclp_running_state_t
{
216 sclp_running_state_idle
,
217 sclp_running_state_running
,
218 sclp_running_state_reset_pending
219 } sclp_running_state
= sclp_running_state_idle
;
221 /* Internal state: is a read request pending? */
222 static volatile enum sclp_reading_state_t
{
223 sclp_reading_state_idle
,
224 sclp_reading_state_reading
225 } sclp_reading_state
= sclp_reading_state_idle
;
227 /* Internal state: is the driver currently serving requests? */
228 static volatile enum sclp_activation_state_t
{
229 sclp_activation_state_active
,
230 sclp_activation_state_deactivating
,
231 sclp_activation_state_inactive
,
232 sclp_activation_state_activating
233 } sclp_activation_state
= sclp_activation_state_active
;
235 /* Internal state: is an init mask request pending? */
236 static volatile enum sclp_mask_state_t
{
237 sclp_mask_state_idle
,
238 sclp_mask_state_initializing
239 } sclp_mask_state
= sclp_mask_state_idle
;
241 /* Maximum retry counts */
242 #define SCLP_INIT_RETRY 3
243 #define SCLP_MASK_RETRY 3
245 /* Timeout intervals in seconds.*/
246 #define SCLP_BUSY_INTERVAL 10
247 #define SCLP_RETRY_INTERVAL 30
249 static void sclp_request_timeout(bool force_restart
);
250 static void sclp_process_queue(void);
251 static void __sclp_make_read_req(void);
252 static int sclp_init_mask(int calculate
);
253 static int sclp_init(void);
256 __sclp_queue_read_req(void)
258 if (sclp_reading_state
== sclp_reading_state_idle
) {
259 sclp_reading_state
= sclp_reading_state_reading
;
260 __sclp_make_read_req();
261 /* Add request to head of queue */
262 list_add(&sclp_read_req
.list
, &sclp_req_queue
);
266 /* Set up request retry timer. Called while sclp_lock is locked. */
268 __sclp_set_request_timer(unsigned long time
, void (*cb
)(struct timer_list
*))
270 del_timer(&sclp_request_timer
);
271 sclp_request_timer
.function
= cb
;
272 sclp_request_timer
.expires
= jiffies
+ time
;
273 add_timer(&sclp_request_timer
);
276 static void sclp_request_timeout_restart(struct timer_list
*unused
)
278 sclp_request_timeout(true);
281 static void sclp_request_timeout_normal(struct timer_list
*unused
)
283 sclp_request_timeout(false);
286 /* Request timeout handler. Restart the request queue. If force_restart,
287 * force restart of running request. */
288 static void sclp_request_timeout(bool force_restart
)
292 /* TMO: A timeout occurred (a=force_restart) */
293 sclp_trace(2, "TMO", force_restart
, 0, true);
295 spin_lock_irqsave(&sclp_lock
, flags
);
297 if (sclp_running_state
== sclp_running_state_running
) {
298 /* Break running state and queue NOP read event request
299 * to get a defined interface state. */
300 __sclp_queue_read_req();
301 sclp_running_state
= sclp_running_state_idle
;
304 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
305 sclp_request_timeout_normal
);
307 spin_unlock_irqrestore(&sclp_lock
, flags
);
308 sclp_process_queue();
312 * Returns the expire value in jiffies of the next pending request timeout,
313 * if any. Needs to be called with sclp_lock.
315 static unsigned long __sclp_req_queue_find_next_timeout(void)
317 unsigned long expires_next
= 0;
318 struct sclp_req
*req
;
320 list_for_each_entry(req
, &sclp_req_queue
, list
) {
321 if (!req
->queue_expires
)
324 (time_before(req
->queue_expires
, expires_next
)))
325 expires_next
= req
->queue_expires
;
331 * Returns expired request, if any, and removes it from the list.
333 static struct sclp_req
*__sclp_req_queue_remove_expired_req(void)
335 unsigned long flags
, now
;
336 struct sclp_req
*req
;
338 spin_lock_irqsave(&sclp_lock
, flags
);
340 /* Don't need list_for_each_safe because we break out after list_del */
341 list_for_each_entry(req
, &sclp_req_queue
, list
) {
342 if (!req
->queue_expires
)
344 if (time_before_eq(req
->queue_expires
, now
)) {
345 if (req
->status
== SCLP_REQ_QUEUED
) {
346 req
->status
= SCLP_REQ_QUEUED_TIMEOUT
;
347 list_del(&req
->list
);
354 spin_unlock_irqrestore(&sclp_lock
, flags
);
359 * Timeout handler for queued requests. Removes request from list and
360 * invokes callback. This timer can be set per request in situations where
361 * waiting too long would be harmful to the system, e.g. during SE reboot.
363 static void sclp_req_queue_timeout(struct timer_list
*unused
)
365 unsigned long flags
, expires_next
;
366 struct sclp_req
*req
;
369 req
= __sclp_req_queue_remove_expired_req();
372 /* RQTM: Request timed out (a=sccb, b=summary) */
373 sclp_trace_req(2, "RQTM", req
, true);
376 if (req
&& req
->callback
)
377 req
->callback(req
, req
->callback_data
);
380 spin_lock_irqsave(&sclp_lock
, flags
);
381 expires_next
= __sclp_req_queue_find_next_timeout();
383 mod_timer(&sclp_queue_timer
, expires_next
);
384 spin_unlock_irqrestore(&sclp_lock
, flags
);
387 static int sclp_service_call_trace(sclp_cmdw_t command
, void *sccb
)
389 static u64 srvc_count
;
392 /* SRV1: Service call about to be issued (a=command, b=sccb address) */
393 sclp_trace_sccb(0, "SRV1", command
, (u64
)sccb
, command
, sccb
, false);
395 rc
= sclp_service_call(command
, sccb
);
397 /* SRV2: Service call was issued (a=rc, b=SRVC sequence number) */
398 sclp_trace(0, "SRV2", -rc
, ++srvc_count
, rc
!= 0);
401 active_cmd
= command
;
406 /* Try to start a request. Return zero if the request was successfully
407 * started or if it will be started at a later time. Return non-zero otherwise.
408 * Called while sclp_lock is locked. */
410 __sclp_start_request(struct sclp_req
*req
)
414 if (sclp_running_state
!= sclp_running_state_idle
)
416 del_timer(&sclp_request_timer
);
417 rc
= sclp_service_call_trace(req
->command
, req
->sccb
);
421 /* Successfully started request */
422 req
->status
= SCLP_REQ_RUNNING
;
423 sclp_running_state
= sclp_running_state_running
;
424 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
425 sclp_request_timeout_restart
);
427 } else if (rc
== -EBUSY
) {
428 /* Try again later */
429 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
430 sclp_request_timeout_normal
);
434 req
->status
= SCLP_REQ_FAILED
;
438 /* Try to start queued requests. */
440 sclp_process_queue(void)
442 struct sclp_req
*req
;
446 spin_lock_irqsave(&sclp_lock
, flags
);
447 if (sclp_running_state
!= sclp_running_state_idle
) {
448 spin_unlock_irqrestore(&sclp_lock
, flags
);
451 del_timer(&sclp_request_timer
);
452 while (!list_empty(&sclp_req_queue
)) {
453 req
= list_entry(sclp_req_queue
.next
, struct sclp_req
, list
);
454 rc
= __sclp_start_request(req
);
458 if (req
->start_count
> 1) {
459 /* Cannot abort already submitted request - could still
460 * be active at the SCLP */
461 __sclp_set_request_timer(SCLP_BUSY_INTERVAL
* HZ
,
462 sclp_request_timeout_normal
);
465 /* Post-processing for aborted request */
466 list_del(&req
->list
);
468 /* RQAB: Request aborted (a=sccb, b=summary) */
469 sclp_trace_req(2, "RQAB", req
, true);
472 spin_unlock_irqrestore(&sclp_lock
, flags
);
473 req
->callback(req
, req
->callback_data
);
474 spin_lock_irqsave(&sclp_lock
, flags
);
477 spin_unlock_irqrestore(&sclp_lock
, flags
);
480 static int __sclp_can_add_request(struct sclp_req
*req
)
482 if (req
== &sclp_init_req
)
484 if (sclp_init_state
!= sclp_init_state_initialized
)
486 if (sclp_activation_state
!= sclp_activation_state_active
)
491 /* Queue a new request. Return zero on success, non-zero otherwise. */
493 sclp_add_request(struct sclp_req
*req
)
498 spin_lock_irqsave(&sclp_lock
, flags
);
499 if (!__sclp_can_add_request(req
)) {
500 spin_unlock_irqrestore(&sclp_lock
, flags
);
504 /* RQAD: Request was added (a=sccb, b=caller) */
505 sclp_trace(2, "RQAD", (u32
)(addr_t
)req
->sccb
, _RET_IP_
, false);
507 req
->status
= SCLP_REQ_QUEUED
;
508 req
->start_count
= 0;
509 list_add_tail(&req
->list
, &sclp_req_queue
);
511 if (req
->queue_timeout
) {
512 req
->queue_expires
= jiffies
+ req
->queue_timeout
* HZ
;
513 if (!timer_pending(&sclp_queue_timer
) ||
514 time_after(sclp_queue_timer
.expires
, req
->queue_expires
))
515 mod_timer(&sclp_queue_timer
, req
->queue_expires
);
517 req
->queue_expires
= 0;
518 /* Start if request is first in list */
519 if (sclp_running_state
== sclp_running_state_idle
&&
520 req
->list
.prev
== &sclp_req_queue
) {
521 rc
= __sclp_start_request(req
);
523 list_del(&req
->list
);
525 spin_unlock_irqrestore(&sclp_lock
, flags
);
529 EXPORT_SYMBOL(sclp_add_request
);
531 /* Dispatch events found in request buffer to registered listeners. Return 0
532 * if all events were dispatched, non-zero otherwise. */
534 sclp_dispatch_evbufs(struct sccb_header
*sccb
)
537 struct evbuf_header
*evbuf
;
539 struct sclp_register
*reg
;
543 spin_lock_irqsave(&sclp_lock
, flags
);
545 for (offset
= sizeof(struct sccb_header
); offset
< sccb
->length
;
546 offset
+= evbuf
->length
) {
547 evbuf
= (struct evbuf_header
*) ((addr_t
) sccb
+ offset
);
548 /* Check for malformed hardware response */
549 if (evbuf
->length
== 0)
551 /* Search for event handler */
553 list_for_each(l
, &sclp_reg_list
) {
554 reg
= list_entry(l
, struct sclp_register
, list
);
555 if (reg
->receive_mask
& SCLP_EVTYP_MASK(evbuf
->type
))
561 /* EVNT: Event callback (b=receiver) */
562 sclp_trace_evbuf(2, "EVNT", 0, reg
? (u64
)reg
->receiver_fn
: 0,
565 if (reg
&& reg
->receiver_fn
) {
566 spin_unlock_irqrestore(&sclp_lock
, flags
);
567 reg
->receiver_fn(evbuf
);
568 spin_lock_irqsave(&sclp_lock
, flags
);
569 } else if (reg
== NULL
)
572 spin_unlock_irqrestore(&sclp_lock
, flags
);
576 /* Read event data request callback. */
578 sclp_read_cb(struct sclp_req
*req
, void *data
)
581 struct sccb_header
*sccb
;
583 sccb
= (struct sccb_header
*) req
->sccb
;
584 if (req
->status
== SCLP_REQ_DONE
&& (sccb
->response_code
== 0x20 ||
585 sccb
->response_code
== 0x220))
586 sclp_dispatch_evbufs(sccb
);
587 spin_lock_irqsave(&sclp_lock
, flags
);
588 sclp_reading_state
= sclp_reading_state_idle
;
589 spin_unlock_irqrestore(&sclp_lock
, flags
);
592 /* Prepare read event data request. Called while sclp_lock is locked. */
593 static void __sclp_make_read_req(void)
595 struct sccb_header
*sccb
;
597 sccb
= (struct sccb_header
*) sclp_read_sccb
;
599 memset(&sclp_read_req
, 0, sizeof(struct sclp_req
));
600 sclp_read_req
.command
= SCLP_CMDW_READ_EVENT_DATA
;
601 sclp_read_req
.status
= SCLP_REQ_QUEUED
;
602 sclp_read_req
.start_count
= 0;
603 sclp_read_req
.callback
= sclp_read_cb
;
604 sclp_read_req
.sccb
= sccb
;
605 sccb
->length
= PAGE_SIZE
;
606 sccb
->function_code
= 0;
607 sccb
->control_mask
[2] = 0x80;
610 /* Search request list for request with matching sccb. Return request if found,
611 * NULL otherwise. Called while sclp_lock is locked. */
612 static inline struct sclp_req
*
613 __sclp_find_req(u32 sccb
)
616 struct sclp_req
*req
;
618 list_for_each(l
, &sclp_req_queue
) {
619 req
= list_entry(l
, struct sclp_req
, list
);
620 if (sccb
== (u32
) (addr_t
) req
->sccb
)
626 static bool ok_response(u32 sccb_int
, sclp_cmdw_t cmd
)
628 struct sccb_header
*sccb
= (struct sccb_header
*)(addr_t
)sccb_int
;
629 struct evbuf_header
*evbuf
;
635 /* Check SCCB response. */
636 response
= sccb
->response_code
& 0xff;
637 if (response
!= 0x10 && response
!= 0x20)
640 /* Check event-processed flag on outgoing events. */
641 if (cmd
== SCLP_CMDW_WRITE_EVENT_DATA
) {
642 evbuf
= (struct evbuf_header
*)(sccb
+ 1);
643 if (!(evbuf
->flags
& 0x80))
650 /* Handler for external interruption. Perform request post-processing.
651 * Prepare read event data request if necessary. Start processing of next
652 * request on queue. */
653 static void sclp_interrupt_handler(struct ext_code ext_code
,
654 unsigned int param32
, unsigned long param64
)
656 struct sclp_req
*req
;
660 inc_irq_stat(IRQEXT_SCP
);
661 spin_lock(&sclp_lock
);
662 finished_sccb
= param32
& 0xfffffff8;
663 evbuf_pending
= param32
& 0x3;
665 /* INT: Interrupt received (a=intparm, b=cmd) */
666 sclp_trace_sccb(0, "INT", param32
, active_cmd
, active_cmd
,
667 (struct sccb_header
*)(addr_t
)finished_sccb
,
668 !ok_response(finished_sccb
, active_cmd
));
671 del_timer(&sclp_request_timer
);
672 sclp_running_state
= sclp_running_state_reset_pending
;
673 req
= __sclp_find_req(finished_sccb
);
675 /* Request post-processing */
676 list_del(&req
->list
);
677 req
->status
= SCLP_REQ_DONE
;
679 /* RQOK: Request success (a=sccb, b=summary) */
680 sclp_trace_req(2, "RQOK", req
, false);
683 spin_unlock(&sclp_lock
);
684 req
->callback(req
, req
->callback_data
);
685 spin_lock(&sclp_lock
);
688 /* UNEX: Unexpected SCCB completion (a=sccb address) */
689 sclp_trace(0, "UNEX", finished_sccb
, 0, true);
691 sclp_running_state
= sclp_running_state_idle
;
695 sclp_activation_state
== sclp_activation_state_active
)
696 __sclp_queue_read_req();
697 spin_unlock(&sclp_lock
);
698 sclp_process_queue();
701 /* Convert interval in jiffies to TOD ticks. */
703 sclp_tod_from_jiffies(unsigned long jiffies
)
705 return (u64
) (jiffies
/ HZ
) << 32;
708 /* Wait until a currently running request finished. Note: while this function
709 * is running, no timers are served on the calling CPU. */
713 unsigned long long old_tick
;
715 unsigned long cr0
, cr0_sync
;
716 static u64 sync_count
;
720 /* SYN1: Synchronous wait start (a=runstate, b=sync count) */
721 sclp_trace(4, "SYN1", sclp_running_state
, ++sync_count
, false);
723 /* We'll be disabling timer interrupts, so we need a custom timeout
726 if (timer_pending(&sclp_request_timer
)) {
727 /* Get timeout TOD value */
728 timeout
= get_tod_clock_fast() +
729 sclp_tod_from_jiffies(sclp_request_timer
.expires
-
732 local_irq_save(flags
);
733 /* Prevent bottom half from executing once we force interrupts open */
734 irq_context
= in_interrupt();
737 /* Enable service-signal interruption, disable timer interrupts */
738 old_tick
= local_tick_disable();
740 __ctl_store(cr0
, 0, 0);
741 cr0_sync
= cr0
& ~CR0_IRQ_SUBCLASS_MASK
;
742 cr0_sync
|= 1UL << (63 - 54);
743 __ctl_load(cr0_sync
, 0, 0);
744 __arch_local_irq_stosm(0x01);
745 /* Loop until driver state indicates finished request */
746 while (sclp_running_state
!= sclp_running_state_idle
) {
747 /* Check for expired request timer */
748 if (timer_pending(&sclp_request_timer
) &&
749 get_tod_clock_fast() > timeout
&&
750 del_timer(&sclp_request_timer
))
751 sclp_request_timer
.function(&sclp_request_timer
);
755 __ctl_load(cr0
, 0, 0);
758 local_tick_enable(old_tick
);
759 local_irq_restore(flags
);
761 /* SYN2: Synchronous wait end (a=runstate, b=sync_count) */
762 sclp_trace(4, "SYN2", sclp_running_state
, sync_count
, false);
764 EXPORT_SYMBOL(sclp_sync_wait
);
766 /* Dispatch changes in send and receive mask to registered listeners. */
768 sclp_dispatch_state_change(void)
771 struct sclp_register
*reg
;
773 sccb_mask_t receive_mask
;
774 sccb_mask_t send_mask
;
777 spin_lock_irqsave(&sclp_lock
, flags
);
779 list_for_each(l
, &sclp_reg_list
) {
780 reg
= list_entry(l
, struct sclp_register
, list
);
781 receive_mask
= reg
->send_mask
& sclp_receive_mask
;
782 send_mask
= reg
->receive_mask
& sclp_send_mask
;
783 if (reg
->sclp_receive_mask
!= receive_mask
||
784 reg
->sclp_send_mask
!= send_mask
) {
785 reg
->sclp_receive_mask
= receive_mask
;
786 reg
->sclp_send_mask
= send_mask
;
791 spin_unlock_irqrestore(&sclp_lock
, flags
);
792 if (reg
&& reg
->state_change_fn
) {
793 /* STCG: State-change callback (b=callback) */
794 sclp_trace(2, "STCG", 0, (u64
)reg
->state_change_fn
,
797 reg
->state_change_fn(reg
);
802 struct sclp_statechangebuf
{
803 struct evbuf_header header
;
804 u8 validity_sclp_active_facility_mask
: 1;
805 u8 validity_sclp_receive_mask
: 1;
806 u8 validity_sclp_send_mask
: 1;
807 u8 validity_read_data_function_mask
: 1;
810 u64 sclp_active_facility_mask
;
811 u8 masks
[2 * 1021 + 4]; /* variable length */
813 * u8 sclp_receive_mask[mask_length];
814 * u8 sclp_send_mask[mask_length];
815 * u32 read_data_function_mask;
817 } __attribute__((packed
));
820 /* State change event callback. Inform listeners of changes. */
822 sclp_state_change_cb(struct evbuf_header
*evbuf
)
825 struct sclp_statechangebuf
*scbuf
;
827 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf
) > PAGE_SIZE
);
829 scbuf
= (struct sclp_statechangebuf
*) evbuf
;
830 spin_lock_irqsave(&sclp_lock
, flags
);
831 if (scbuf
->validity_sclp_receive_mask
)
832 sclp_receive_mask
= sccb_get_recv_mask(scbuf
);
833 if (scbuf
->validity_sclp_send_mask
)
834 sclp_send_mask
= sccb_get_send_mask(scbuf
);
835 spin_unlock_irqrestore(&sclp_lock
, flags
);
836 if (scbuf
->validity_sclp_active_facility_mask
)
837 sclp
.facilities
= scbuf
->sclp_active_facility_mask
;
838 sclp_dispatch_state_change();
841 static struct sclp_register sclp_state_change_event
= {
842 .receive_mask
= EVTYP_STATECHANGE_MASK
,
843 .receiver_fn
= sclp_state_change_cb
846 /* Calculate receive and send mask of currently registered listeners.
847 * Called while sclp_lock is locked. */
849 __sclp_get_mask(sccb_mask_t
*receive_mask
, sccb_mask_t
*send_mask
)
852 struct sclp_register
*t
;
856 list_for_each(l
, &sclp_reg_list
) {
857 t
= list_entry(l
, struct sclp_register
, list
);
858 *receive_mask
|= t
->receive_mask
;
859 *send_mask
|= t
->send_mask
;
863 /* Register event listener. Return 0 on success, non-zero otherwise. */
865 sclp_register(struct sclp_register
*reg
)
868 sccb_mask_t receive_mask
;
869 sccb_mask_t send_mask
;
872 /* REG: Event listener registered (b=caller) */
873 sclp_trace_register(2, "REG", 0, _RET_IP_
, reg
);
878 spin_lock_irqsave(&sclp_lock
, flags
);
879 /* Check event mask for collisions */
880 __sclp_get_mask(&receive_mask
, &send_mask
);
881 if (reg
->receive_mask
& receive_mask
|| reg
->send_mask
& send_mask
) {
882 spin_unlock_irqrestore(&sclp_lock
, flags
);
885 /* Trigger initial state change callback */
886 reg
->sclp_receive_mask
= 0;
887 reg
->sclp_send_mask
= 0;
888 list_add(®
->list
, &sclp_reg_list
);
889 spin_unlock_irqrestore(&sclp_lock
, flags
);
890 rc
= sclp_init_mask(1);
892 spin_lock_irqsave(&sclp_lock
, flags
);
893 list_del(®
->list
);
894 spin_unlock_irqrestore(&sclp_lock
, flags
);
899 EXPORT_SYMBOL(sclp_register
);
901 /* Unregister event listener. */
903 sclp_unregister(struct sclp_register
*reg
)
907 /* UREG: Event listener unregistered (b=caller) */
908 sclp_trace_register(2, "UREG", 0, _RET_IP_
, reg
);
910 spin_lock_irqsave(&sclp_lock
, flags
);
911 list_del(®
->list
);
912 spin_unlock_irqrestore(&sclp_lock
, flags
);
916 EXPORT_SYMBOL(sclp_unregister
);
918 /* Remove event buffers which are marked processed. Return the number of
919 * remaining event buffers. */
921 sclp_remove_processed(struct sccb_header
*sccb
)
923 struct evbuf_header
*evbuf
;
927 evbuf
= (struct evbuf_header
*) (sccb
+ 1);
929 remaining
= sccb
->length
- sizeof(struct sccb_header
);
930 while (remaining
> 0) {
931 remaining
-= evbuf
->length
;
932 if (evbuf
->flags
& 0x80) {
933 sccb
->length
-= evbuf
->length
;
934 memcpy(evbuf
, (void *) ((addr_t
) evbuf
+ evbuf
->length
),
938 evbuf
= (struct evbuf_header
*)
939 ((addr_t
) evbuf
+ evbuf
->length
);
945 EXPORT_SYMBOL(sclp_remove_processed
);
947 /* Prepare init mask request. Called while sclp_lock is locked. */
949 __sclp_make_init_req(sccb_mask_t receive_mask
, sccb_mask_t send_mask
)
951 struct init_sccb
*sccb
= sclp_init_sccb
;
954 memset(&sclp_init_req
, 0, sizeof(struct sclp_req
));
955 sclp_init_req
.command
= SCLP_CMDW_WRITE_EVENT_MASK
;
956 sclp_init_req
.status
= SCLP_REQ_FILLED
;
957 sclp_init_req
.start_count
= 0;
958 sclp_init_req
.callback
= NULL
;
959 sclp_init_req
.callback_data
= NULL
;
960 sclp_init_req
.sccb
= sccb
;
961 sccb
->header
.length
= sizeof(*sccb
);
962 if (sclp_mask_compat_mode
)
963 sccb
->mask_length
= SCLP_MASK_SIZE_COMPAT
;
965 sccb
->mask_length
= sizeof(sccb_mask_t
);
966 sccb_set_recv_mask(sccb
, receive_mask
);
967 sccb_set_send_mask(sccb
, send_mask
);
968 sccb_set_sclp_recv_mask(sccb
, 0);
969 sccb_set_sclp_send_mask(sccb
, 0);
972 /* Start init mask request. If calculate is non-zero, calculate the mask as
973 * requested by registered listeners. Use zero mask otherwise. Return 0 on
974 * success, non-zero otherwise. */
976 sclp_init_mask(int calculate
)
979 struct init_sccb
*sccb
= sclp_init_sccb
;
980 sccb_mask_t receive_mask
;
981 sccb_mask_t send_mask
;
986 spin_lock_irqsave(&sclp_lock
, flags
);
987 /* Check if interface is in appropriate state */
988 if (sclp_mask_state
!= sclp_mask_state_idle
) {
989 spin_unlock_irqrestore(&sclp_lock
, flags
);
992 if (sclp_activation_state
== sclp_activation_state_inactive
) {
993 spin_unlock_irqrestore(&sclp_lock
, flags
);
996 sclp_mask_state
= sclp_mask_state_initializing
;
999 __sclp_get_mask(&receive_mask
, &send_mask
);
1005 for (retry
= 0; retry
<= SCLP_MASK_RETRY
; retry
++) {
1006 /* Prepare request */
1007 __sclp_make_init_req(receive_mask
, send_mask
);
1008 spin_unlock_irqrestore(&sclp_lock
, flags
);
1009 if (sclp_add_request(&sclp_init_req
)) {
1010 /* Try again later */
1011 wait
= jiffies
+ SCLP_BUSY_INTERVAL
* HZ
;
1012 while (time_before(jiffies
, wait
))
1014 spin_lock_irqsave(&sclp_lock
, flags
);
1017 while (sclp_init_req
.status
!= SCLP_REQ_DONE
&&
1018 sclp_init_req
.status
!= SCLP_REQ_FAILED
)
1020 spin_lock_irqsave(&sclp_lock
, flags
);
1021 if (sclp_init_req
.status
== SCLP_REQ_DONE
&&
1022 sccb
->header
.response_code
== 0x20) {
1023 /* Successful request */
1025 sclp_receive_mask
= sccb_get_sclp_recv_mask(sccb
);
1026 sclp_send_mask
= sccb_get_sclp_send_mask(sccb
);
1028 sclp_receive_mask
= 0;
1031 spin_unlock_irqrestore(&sclp_lock
, flags
);
1032 sclp_dispatch_state_change();
1033 spin_lock_irqsave(&sclp_lock
, flags
);
1038 sclp_mask_state
= sclp_mask_state_idle
;
1039 spin_unlock_irqrestore(&sclp_lock
, flags
);
1043 /* Deactivate SCLP interface. On success, new requests will be rejected,
1044 * events will no longer be dispatched. Return 0 on success, non-zero
1047 sclp_deactivate(void)
1049 unsigned long flags
;
1052 spin_lock_irqsave(&sclp_lock
, flags
);
1053 /* Deactivate can only be called when active */
1054 if (sclp_activation_state
!= sclp_activation_state_active
) {
1055 spin_unlock_irqrestore(&sclp_lock
, flags
);
1058 sclp_activation_state
= sclp_activation_state_deactivating
;
1059 spin_unlock_irqrestore(&sclp_lock
, flags
);
1060 rc
= sclp_init_mask(0);
1061 spin_lock_irqsave(&sclp_lock
, flags
);
1063 sclp_activation_state
= sclp_activation_state_inactive
;
1065 sclp_activation_state
= sclp_activation_state_active
;
1066 spin_unlock_irqrestore(&sclp_lock
, flags
);
1070 EXPORT_SYMBOL(sclp_deactivate
);
1072 /* Reactivate SCLP interface after sclp_deactivate. On success, new
1073 * requests will be accepted, events will be dispatched again. Return 0 on
1074 * success, non-zero otherwise. */
1076 sclp_reactivate(void)
1078 unsigned long flags
;
1081 spin_lock_irqsave(&sclp_lock
, flags
);
1082 /* Reactivate can only be called when inactive */
1083 if (sclp_activation_state
!= sclp_activation_state_inactive
) {
1084 spin_unlock_irqrestore(&sclp_lock
, flags
);
1087 sclp_activation_state
= sclp_activation_state_activating
;
1088 spin_unlock_irqrestore(&sclp_lock
, flags
);
1089 rc
= sclp_init_mask(1);
1090 spin_lock_irqsave(&sclp_lock
, flags
);
1092 sclp_activation_state
= sclp_activation_state_active
;
1094 sclp_activation_state
= sclp_activation_state_inactive
;
1095 spin_unlock_irqrestore(&sclp_lock
, flags
);
1099 EXPORT_SYMBOL(sclp_reactivate
);
1101 /* Handler for external interruption used during initialization. Modify
1102 * request state to done. */
1103 static void sclp_check_handler(struct ext_code ext_code
,
1104 unsigned int param32
, unsigned long param64
)
1108 inc_irq_stat(IRQEXT_SCP
);
1109 finished_sccb
= param32
& 0xfffffff8;
1110 /* Is this the interrupt we are waiting for? */
1111 if (finished_sccb
== 0)
1113 if (finished_sccb
!= (u32
) (addr_t
) sclp_init_sccb
)
1114 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
1116 spin_lock(&sclp_lock
);
1117 if (sclp_running_state
== sclp_running_state_running
) {
1118 sclp_init_req
.status
= SCLP_REQ_DONE
;
1119 sclp_running_state
= sclp_running_state_idle
;
1121 spin_unlock(&sclp_lock
);
1124 /* Initial init mask request timed out. Modify request state to failed. */
1126 sclp_check_timeout(struct timer_list
*unused
)
1128 unsigned long flags
;
1130 spin_lock_irqsave(&sclp_lock
, flags
);
1131 if (sclp_running_state
== sclp_running_state_running
) {
1132 sclp_init_req
.status
= SCLP_REQ_FAILED
;
1133 sclp_running_state
= sclp_running_state_idle
;
1135 spin_unlock_irqrestore(&sclp_lock
, flags
);
1138 /* Perform a check of the SCLP interface. Return zero if the interface is
1139 * available and there are no pending requests from a previous instance.
1140 * Return non-zero otherwise. */
1142 sclp_check_interface(void)
1144 struct init_sccb
*sccb
;
1145 unsigned long flags
;
1149 spin_lock_irqsave(&sclp_lock
, flags
);
1150 /* Prepare init mask command */
1151 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
1153 spin_unlock_irqrestore(&sclp_lock
, flags
);
1156 for (retry
= 0; retry
<= SCLP_INIT_RETRY
; retry
++) {
1157 __sclp_make_init_req(0, 0);
1158 sccb
= (struct init_sccb
*) sclp_init_req
.sccb
;
1159 rc
= sclp_service_call_trace(sclp_init_req
.command
, sccb
);
1162 sclp_init_req
.status
= SCLP_REQ_RUNNING
;
1163 sclp_running_state
= sclp_running_state_running
;
1164 __sclp_set_request_timer(SCLP_RETRY_INTERVAL
* HZ
,
1165 sclp_check_timeout
);
1166 spin_unlock_irqrestore(&sclp_lock
, flags
);
1167 /* Enable service-signal interruption - needs to happen
1168 * with IRQs enabled. */
1169 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1170 /* Wait for signal from interrupt or timeout */
1172 /* Disable service-signal interruption - needs to happen
1173 * with IRQs enabled. */
1174 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1175 spin_lock_irqsave(&sclp_lock
, flags
);
1176 del_timer(&sclp_request_timer
);
1178 if (sclp_init_req
.status
== SCLP_REQ_DONE
) {
1179 if (sccb
->header
.response_code
== 0x20) {
1182 } else if (sccb
->header
.response_code
== 0x74f0) {
1183 if (!sclp_mask_compat_mode
) {
1184 sclp_mask_compat_mode
= true;
1190 unregister_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_check_handler
);
1191 spin_unlock_irqrestore(&sclp_lock
, flags
);
1195 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1196 * events from interfering with rebooted system. */
1198 sclp_reboot_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1204 static struct notifier_block sclp_reboot_notifier
= {
1205 .notifier_call
= sclp_reboot_event
1208 static ssize_t
con_pages_show(struct device_driver
*dev
, char *buf
)
1210 return sprintf(buf
, "%i\n", sclp_console_pages
);
1213 static DRIVER_ATTR_RO(con_pages
);
1215 static ssize_t
con_drop_show(struct device_driver
*dev
, char *buf
)
1217 return sprintf(buf
, "%i\n", sclp_console_drop
);
1220 static DRIVER_ATTR_RO(con_drop
);
1222 static ssize_t
con_full_show(struct device_driver
*dev
, char *buf
)
1224 return sprintf(buf
, "%lu\n", sclp_console_full
);
1227 static DRIVER_ATTR_RO(con_full
);
1229 static struct attribute
*sclp_drv_attrs
[] = {
1230 &driver_attr_con_pages
.attr
,
1231 &driver_attr_con_drop
.attr
,
1232 &driver_attr_con_full
.attr
,
1235 static struct attribute_group sclp_drv_attr_group
= {
1236 .attrs
= sclp_drv_attrs
,
1238 static const struct attribute_group
*sclp_drv_attr_groups
[] = {
1239 &sclp_drv_attr_group
,
1243 static struct platform_driver sclp_pdrv
= {
1246 .groups
= sclp_drv_attr_groups
,
1250 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1255 unsigned long flags
;
1258 spin_lock_irqsave(&sclp_lock
, flags
);
1259 /* Check for previous or running initialization */
1260 if (sclp_init_state
!= sclp_init_state_uninitialized
)
1262 sclp_init_state
= sclp_init_state_initializing
;
1263 sclp_read_sccb
= (void *) __get_free_page(GFP_ATOMIC
| GFP_DMA
);
1264 sclp_init_sccb
= (void *) __get_free_page(GFP_ATOMIC
| GFP_DMA
);
1265 BUG_ON(!sclp_read_sccb
|| !sclp_init_sccb
);
1266 /* Set up variables */
1267 list_add(&sclp_state_change_event
.list
, &sclp_reg_list
);
1268 timer_setup(&sclp_request_timer
, NULL
, 0);
1269 timer_setup(&sclp_queue_timer
, sclp_req_queue_timeout
, 0);
1270 /* Check interface */
1271 spin_unlock_irqrestore(&sclp_lock
, flags
);
1272 rc
= sclp_check_interface();
1273 spin_lock_irqsave(&sclp_lock
, flags
);
1275 goto fail_init_state_uninitialized
;
1276 /* Register reboot handler */
1277 rc
= register_reboot_notifier(&sclp_reboot_notifier
);
1279 goto fail_init_state_uninitialized
;
1280 /* Register interrupt handler */
1281 rc
= register_external_irq(EXT_IRQ_SERVICE_SIG
, sclp_interrupt_handler
);
1283 goto fail_unregister_reboot_notifier
;
1284 sclp_init_state
= sclp_init_state_initialized
;
1285 spin_unlock_irqrestore(&sclp_lock
, flags
);
1286 /* Enable service-signal external interruption - needs to happen with
1288 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL
);
1292 fail_unregister_reboot_notifier
:
1293 unregister_reboot_notifier(&sclp_reboot_notifier
);
1294 fail_init_state_uninitialized
:
1295 sclp_init_state
= sclp_init_state_uninitialized
;
1296 free_page((unsigned long) sclp_read_sccb
);
1297 free_page((unsigned long) sclp_init_sccb
);
1299 spin_unlock_irqrestore(&sclp_lock
, flags
);
1303 static __init
int sclp_initcall(void)
1307 rc
= platform_driver_register(&sclp_pdrv
);
1314 arch_initcall(sclp_initcall
);