]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * drivers/s390/s390mach.c | |
3 | * S/390 machine check handler | |
4 | * | |
5 | * S390 version | |
6 | * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
7 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | |
9 | */ | |
10 | ||
1da177e4 LT |
11 | #include <linux/init.h> |
12 | #include <linux/sched.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/workqueue.h> | |
022e4fc0 | 15 | #include <linux/time.h> |
84d11c5d | 16 | #include <linux/kthread.h> |
1da177e4 LT |
17 | |
18 | #include <asm/lowcore.h> | |
19 | ||
20 | #include "s390mach.h" | |
21 | ||
22 | #define DBG printk | |
23 | // #define DBG(args,...) do {} while (0); | |
24 | ||
25 | static struct semaphore m_sem; | |
26 | ||
fb6958a5 | 27 | extern int css_process_crw(int, int); |
1da177e4 LT |
28 | extern int chsc_process_crw(void); |
29 | extern int chp_process_crw(int, int); | |
30 | extern void css_reiterate_subchannels(void); | |
31 | ||
32 | extern struct workqueue_struct *slow_path_wq; | |
33 | extern struct work_struct slow_path_work; | |
34 | ||
77fa2245 | 35 | static NORET_TYPE void |
1da177e4 LT |
36 | s390_handle_damage(char *msg) |
37 | { | |
1da177e4 LT |
38 | #ifdef CONFIG_SMP |
39 | smp_send_stop(); | |
40 | #endif | |
41 | disabled_wait((unsigned long) __builtin_return_address(0)); | |
77fa2245 | 42 | for(;;); |
1da177e4 LT |
43 | } |
44 | ||
45 | /* | |
46 | * Retrieve CRWs and call function to handle event. | |
47 | * | |
48 | * Note : we currently process CRWs for io and chsc subchannels only | |
49 | */ | |
50 | static int | |
51 | s390_collect_crw_info(void *param) | |
52 | { | |
fb6958a5 | 53 | struct crw crw[2]; |
1da177e4 LT |
54 | int ccode, ret, slow; |
55 | struct semaphore *sem; | |
fb6958a5 | 56 | unsigned int chain; |
1da177e4 LT |
57 | |
58 | sem = (struct semaphore *)param; | |
1da177e4 LT |
59 | repeat: |
60 | down_interruptible(sem); | |
61 | slow = 0; | |
fb6958a5 | 62 | chain = 0; |
1da177e4 | 63 | while (1) { |
fb6958a5 CH |
64 | if (unlikely(chain > 1)) { |
65 | struct crw tmp_crw; | |
66 | ||
67 | printk(KERN_WARNING"%s: Code does not support more " | |
68 | "than two chained crws; please report to " | |
69 | "linux390@de.ibm.com!\n", __FUNCTION__); | |
70 | ccode = stcrw(&tmp_crw); | |
71 | printk(KERN_WARNING"%s: crw reports slct=%d, oflw=%d, " | |
72 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | |
73 | __FUNCTION__, tmp_crw.slct, tmp_crw.oflw, | |
74 | tmp_crw.chn, tmp_crw.rsc, tmp_crw.anc, | |
75 | tmp_crw.erc, tmp_crw.rsid); | |
76 | printk(KERN_WARNING"%s: This was crw number %x in the " | |
77 | "chain\n", __FUNCTION__, chain); | |
78 | if (ccode != 0) | |
79 | break; | |
80 | chain = tmp_crw.chn ? chain + 1 : 0; | |
81 | continue; | |
82 | } | |
83 | ccode = stcrw(&crw[chain]); | |
1da177e4 LT |
84 | if (ccode != 0) |
85 | break; | |
86 | DBG(KERN_DEBUG "crw_info : CRW reports slct=%d, oflw=%d, " | |
87 | "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", | |
fb6958a5 CH |
88 | crw[chain].slct, crw[chain].oflw, crw[chain].chn, |
89 | crw[chain].rsc, crw[chain].anc, crw[chain].erc, | |
90 | crw[chain].rsid); | |
1da177e4 | 91 | /* Check for overflows. */ |
fb6958a5 | 92 | if (crw[chain].oflw) { |
1da177e4 LT |
93 | pr_debug("%s: crw overflow detected!\n", __FUNCTION__); |
94 | css_reiterate_subchannels(); | |
fb6958a5 | 95 | chain = 0; |
1da177e4 LT |
96 | slow = 1; |
97 | continue; | |
98 | } | |
fb6958a5 | 99 | switch (crw[chain].rsc) { |
1da177e4 | 100 | case CRW_RSC_SCH: |
fb6958a5 CH |
101 | if (crw[0].chn && !chain) |
102 | break; | |
103 | pr_debug("source is subchannel %04X\n", crw[0].rsid); | |
104 | ret = css_process_crw (crw[0].rsid, | |
105 | chain ? crw[1].rsid : 0); | |
1da177e4 LT |
106 | if (ret == -EAGAIN) |
107 | slow = 1; | |
108 | break; | |
109 | case CRW_RSC_MONITOR: | |
110 | pr_debug("source is monitoring facility\n"); | |
111 | break; | |
112 | case CRW_RSC_CPATH: | |
fb6958a5 CH |
113 | pr_debug("source is channel path %02X\n", crw[0].rsid); |
114 | switch (crw[0].erc) { | |
1da177e4 | 115 | case CRW_ERC_IPARM: /* Path has come. */ |
fb6958a5 | 116 | ret = chp_process_crw(crw[0].rsid, 1); |
1da177e4 LT |
117 | break; |
118 | case CRW_ERC_PERRI: /* Path has gone. */ | |
119 | case CRW_ERC_PERRN: | |
fb6958a5 | 120 | ret = chp_process_crw(crw[0].rsid, 0); |
1da177e4 LT |
121 | break; |
122 | default: | |
123 | pr_debug("Don't know how to handle erc=%x\n", | |
fb6958a5 | 124 | crw[0].erc); |
1da177e4 LT |
125 | ret = 0; |
126 | } | |
127 | if (ret == -EAGAIN) | |
128 | slow = 1; | |
129 | break; | |
130 | case CRW_RSC_CONFIG: | |
131 | pr_debug("source is configuration-alert facility\n"); | |
132 | break; | |
133 | case CRW_RSC_CSS: | |
134 | pr_debug("source is channel subsystem\n"); | |
135 | ret = chsc_process_crw(); | |
136 | if (ret == -EAGAIN) | |
137 | slow = 1; | |
138 | break; | |
139 | default: | |
140 | pr_debug("unknown source\n"); | |
141 | break; | |
142 | } | |
fb6958a5 CH |
143 | /* chain is always 0 or 1 here. */ |
144 | chain = crw[chain].chn ? chain + 1 : 0; | |
1da177e4 LT |
145 | } |
146 | if (slow) | |
147 | queue_work(slow_path_wq, &slow_path_work); | |
148 | goto repeat; | |
149 | return 0; | |
150 | } | |
151 | ||
77fa2245 HC |
152 | struct mcck_struct { |
153 | int kill_task; | |
154 | int channel_report; | |
155 | int warning; | |
156 | unsigned long long mcck_code; | |
157 | }; | |
158 | ||
159 | static DEFINE_PER_CPU(struct mcck_struct, cpu_mcck); | |
160 | ||
1da177e4 | 161 | /* |
77fa2245 HC |
162 | * Main machine check handler function. Will be called with interrupts enabled |
163 | * or disabled and machine checks enabled or disabled. | |
1da177e4 LT |
164 | */ |
165 | void | |
77fa2245 | 166 | s390_handle_mcck(void) |
1da177e4 | 167 | { |
77fa2245 HC |
168 | unsigned long flags; |
169 | struct mcck_struct mcck; | |
1da177e4 | 170 | |
77fa2245 HC |
171 | /* |
172 | * Disable machine checks and get the current state of accumulated | |
173 | * machine checks. Afterwards delete the old state and enable machine | |
174 | * checks again. | |
175 | */ | |
176 | local_irq_save(flags); | |
177 | local_mcck_disable(); | |
178 | mcck = __get_cpu_var(cpu_mcck); | |
179 | memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct)); | |
180 | clear_thread_flag(TIF_MCCK_PENDING); | |
181 | local_mcck_enable(); | |
182 | local_irq_restore(flags); | |
1da177e4 | 183 | |
77fa2245 | 184 | if (mcck.channel_report) |
1da177e4 LT |
185 | up(&m_sem); |
186 | ||
187 | #ifdef CONFIG_MACHCHK_WARNING | |
188 | /* | |
189 | * The warning may remain for a prolonged period on the bare iron. | |
190 | * (actually till the machine is powered off, or until the problem is gone) | |
191 | * So we just stop listening for the WARNING MCH and prevent continuously | |
192 | * being interrupted. One caveat is however, that we must do this per | |
193 | * processor and cannot use the smp version of ctl_clear_bit(). | |
194 | * On VM we only get one interrupt per virtally presented machinecheck. | |
195 | * Though one suffices, we may get one interrupt per (virtual) processor. | |
196 | */ | |
77fa2245 | 197 | if (mcck.warning) { /* WARNING pending ? */ |
1da177e4 LT |
198 | static int mchchk_wng_posted = 0; |
199 | /* | |
200 | * Use single machine clear, as we cannot handle smp right now | |
201 | */ | |
202 | __ctl_clear_bit(14, 24); /* Disable WARNING MCH */ | |
203 | if (xchg(&mchchk_wng_posted, 1) == 0) | |
204 | kill_proc(1, SIGPWR, 1); | |
205 | } | |
206 | #endif | |
77fa2245 HC |
207 | |
208 | if (mcck.kill_task) { | |
209 | local_irq_enable(); | |
210 | printk(KERN_EMERG "mcck: Terminating task because of machine " | |
211 | "malfunction (code 0x%016llx).\n", mcck.mcck_code); | |
212 | printk(KERN_EMERG "mcck: task: %s, pid: %d.\n", | |
213 | current->comm, current->pid); | |
214 | do_exit(SIGSEGV); | |
215 | } | |
216 | } | |
217 | ||
218 | /* | |
219 | * returns 0 if all registers could be validated | |
220 | * returns 1 otherwise | |
221 | */ | |
222 | static int | |
223 | s390_revalidate_registers(struct mci *mci) | |
224 | { | |
225 | int kill_task; | |
226 | u64 tmpclock; | |
227 | u64 zero; | |
228 | void *fpt_save_area, *fpt_creg_save_area; | |
229 | ||
230 | kill_task = 0; | |
231 | zero = 0; | |
232 | /* General purpose registers */ | |
233 | if (!mci->gr) | |
234 | /* | |
235 | * General purpose registers couldn't be restored and have | |
236 | * unknown contents. Process needs to be terminated. | |
237 | */ | |
238 | kill_task = 1; | |
239 | ||
240 | /* Revalidate floating point registers */ | |
241 | if (!mci->fp) | |
242 | /* | |
243 | * Floating point registers can't be restored and | |
244 | * therefore the process needs to be terminated. | |
245 | */ | |
246 | kill_task = 1; | |
247 | ||
347a8dc3 | 248 | #ifndef CONFIG_64BIT |
77fa2245 HC |
249 | asm volatile("ld 0,0(%0)\n" |
250 | "ld 2,8(%0)\n" | |
251 | "ld 4,16(%0)\n" | |
252 | "ld 6,24(%0)" | |
253 | : : "a" (&S390_lowcore.floating_pt_save_area)); | |
254 | #endif | |
255 | ||
256 | if (MACHINE_HAS_IEEE) { | |
347a8dc3 | 257 | #ifdef CONFIG_64BIT |
77fa2245 HC |
258 | fpt_save_area = &S390_lowcore.floating_pt_save_area; |
259 | fpt_creg_save_area = &S390_lowcore.fpt_creg_save_area; | |
260 | #else | |
261 | fpt_save_area = (void *) S390_lowcore.extended_save_area_addr; | |
262 | fpt_creg_save_area = fpt_save_area+128; | |
263 | #endif | |
264 | /* Floating point control register */ | |
265 | if (!mci->fc) { | |
266 | /* | |
267 | * Floating point control register can't be restored. | |
268 | * Task will be terminated. | |
269 | */ | |
ae6aa2ea | 270 | asm volatile ("lfpc 0(%0)" : : "a" (&zero), "m" (zero)); |
77fa2245 HC |
271 | kill_task = 1; |
272 | ||
273 | } | |
274 | else | |
275 | asm volatile ( | |
276 | "lfpc 0(%0)" | |
277 | : : "a" (fpt_creg_save_area)); | |
278 | ||
279 | asm volatile("ld 0,0(%0)\n" | |
280 | "ld 1,8(%0)\n" | |
281 | "ld 2,16(%0)\n" | |
282 | "ld 3,24(%0)\n" | |
283 | "ld 4,32(%0)\n" | |
284 | "ld 5,40(%0)\n" | |
285 | "ld 6,48(%0)\n" | |
286 | "ld 7,56(%0)\n" | |
287 | "ld 8,64(%0)\n" | |
288 | "ld 9,72(%0)\n" | |
289 | "ld 10,80(%0)\n" | |
290 | "ld 11,88(%0)\n" | |
291 | "ld 12,96(%0)\n" | |
292 | "ld 13,104(%0)\n" | |
293 | "ld 14,112(%0)\n" | |
294 | "ld 15,120(%0)\n" | |
295 | : : "a" (fpt_save_area)); | |
296 | } | |
297 | ||
298 | /* Revalidate access registers */ | |
299 | asm volatile("lam 0,15,0(%0)" | |
300 | : : "a" (&S390_lowcore.access_regs_save_area)); | |
301 | if (!mci->ar) | |
302 | /* | |
303 | * Access registers have unknown contents. | |
304 | * Terminating task. | |
305 | */ | |
306 | kill_task = 1; | |
307 | ||
308 | /* Revalidate control registers */ | |
309 | if (!mci->cr) | |
310 | /* | |
311 | * Control registers have unknown contents. | |
312 | * Can't recover and therefore stopping machine. | |
313 | */ | |
314 | s390_handle_damage("invalid control registers."); | |
315 | else | |
347a8dc3 | 316 | #ifdef CONFIG_64BIT |
77fa2245 HC |
317 | asm volatile("lctlg 0,15,0(%0)" |
318 | : : "a" (&S390_lowcore.cregs_save_area)); | |
319 | #else | |
320 | asm volatile("lctl 0,15,0(%0)" | |
321 | : : "a" (&S390_lowcore.cregs_save_area)); | |
322 | #endif | |
323 | ||
324 | /* | |
325 | * We don't even try to revalidate the TOD register, since we simply | |
326 | * can't write something sensible into that register. | |
327 | */ | |
328 | ||
347a8dc3 | 329 | #ifdef CONFIG_64BIT |
77fa2245 HC |
330 | /* |
331 | * See if we can revalidate the TOD programmable register with its | |
332 | * old contents (should be zero) otherwise set it to zero. | |
333 | */ | |
334 | if (!mci->pr) | |
335 | asm volatile("sr 0,0\n" | |
336 | "sckpf" | |
337 | : : : "0", "cc"); | |
338 | else | |
339 | asm volatile( | |
340 | "l 0,0(%0)\n" | |
341 | "sckpf" | |
342 | : : "a" (&S390_lowcore.tod_progreg_save_area) : "0", "cc"); | |
343 | #endif | |
344 | ||
345 | /* Revalidate clock comparator register */ | |
346 | asm volatile ("stck 0(%1)\n" | |
347 | "sckc 0(%1)" | |
348 | : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); | |
349 | ||
350 | /* Check if old PSW is valid */ | |
351 | if (!mci->wp) | |
352 | /* | |
353 | * Can't tell if we come from user or kernel mode | |
354 | * -> stopping machine. | |
355 | */ | |
356 | s390_handle_damage("old psw invalid."); | |
357 | ||
358 | if (!mci->ms || !mci->pm || !mci->ia) | |
359 | kill_task = 1; | |
360 | ||
361 | return kill_task; | |
362 | } | |
363 | ||
b73d40c6 | 364 | #define MAX_IPD_COUNT 29 |
022e4fc0 | 365 | #define MAX_IPD_TIME (5 * 60 * USEC_PER_SEC) /* 5 minutes */ |
b73d40c6 | 366 | |
77fa2245 HC |
367 | /* |
368 | * machine check handler. | |
369 | */ | |
370 | void | |
371 | s390_do_machine_check(struct pt_regs *regs) | |
372 | { | |
b73d40c6 HC |
373 | static DEFINE_SPINLOCK(ipd_lock); |
374 | static unsigned long long last_ipd; | |
375 | static int ipd_count; | |
376 | unsigned long long tmp; | |
77fa2245 HC |
377 | struct mci *mci; |
378 | struct mcck_struct *mcck; | |
379 | int umode; | |
380 | ||
8e9ccae6 HC |
381 | lockdep_off(); |
382 | ||
77fa2245 HC |
383 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; |
384 | mcck = &__get_cpu_var(cpu_mcck); | |
385 | umode = user_mode(regs); | |
386 | ||
387 | if (mci->sd) | |
388 | /* System damage -> stopping machine */ | |
389 | s390_handle_damage("received system damage machine check."); | |
390 | ||
391 | if (mci->pd) { | |
392 | if (mci->b) { | |
393 | /* Processing backup -> verify if we can survive this */ | |
394 | u64 z_mcic, o_mcic, t_mcic; | |
347a8dc3 | 395 | #ifdef CONFIG_64BIT |
77fa2245 HC |
396 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29); |
397 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | | |
398 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | | |
399 | 1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 | | |
400 | 1ULL<<16); | |
401 | #else | |
402 | z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<57 | 1ULL<<50 | | |
403 | 1ULL<<29); | |
404 | o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 | | |
405 | 1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 | | |
406 | 1ULL<<30 | 1ULL<<20 | 1ULL<<17 | 1ULL<<16); | |
407 | #endif | |
408 | t_mcic = *(u64 *)mci; | |
409 | ||
410 | if (((t_mcic & z_mcic) != 0) || | |
411 | ((t_mcic & o_mcic) != o_mcic)) { | |
412 | s390_handle_damage("processing backup machine " | |
413 | "check with damage."); | |
414 | } | |
b73d40c6 HC |
415 | |
416 | /* | |
417 | * Nullifying exigent condition, therefore we might | |
418 | * retry this instruction. | |
419 | */ | |
420 | ||
421 | spin_lock(&ipd_lock); | |
422 | ||
423 | tmp = get_clock(); | |
424 | ||
425 | if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME) | |
426 | ipd_count++; | |
427 | else | |
428 | ipd_count = 1; | |
429 | ||
430 | last_ipd = tmp; | |
431 | ||
432 | if (ipd_count == MAX_IPD_COUNT) | |
433 | s390_handle_damage("too many ipd retries."); | |
434 | ||
435 | spin_unlock(&ipd_lock); | |
77fa2245 HC |
436 | } |
437 | else { | |
438 | /* Processing damage -> stopping machine */ | |
439 | s390_handle_damage("received instruction processing " | |
440 | "damage machine check."); | |
441 | } | |
442 | } | |
443 | if (s390_revalidate_registers(mci)) { | |
444 | if (umode) { | |
445 | /* | |
446 | * Couldn't restore all register contents while in | |
447 | * user mode -> mark task for termination. | |
448 | */ | |
449 | mcck->kill_task = 1; | |
450 | mcck->mcck_code = *(unsigned long long *) mci; | |
451 | set_thread_flag(TIF_MCCK_PENDING); | |
452 | } | |
453 | else | |
454 | /* | |
455 | * Couldn't restore all register contents while in | |
456 | * kernel mode -> stopping machine. | |
457 | */ | |
458 | s390_handle_damage("unable to revalidate registers."); | |
459 | } | |
460 | ||
461 | if (mci->se) | |
462 | /* Storage error uncorrected */ | |
463 | s390_handle_damage("received storage error uncorrected " | |
464 | "machine check."); | |
465 | ||
466 | if (mci->ke) | |
467 | /* Storage key-error uncorrected */ | |
468 | s390_handle_damage("received storage key-error uncorrected " | |
469 | "machine check."); | |
470 | ||
471 | if (mci->ds && mci->fa) | |
472 | /* Storage degradation */ | |
473 | s390_handle_damage("received storage degradation machine " | |
474 | "check."); | |
475 | ||
476 | if (mci->cp) { | |
477 | /* Channel report word pending */ | |
478 | mcck->channel_report = 1; | |
479 | set_thread_flag(TIF_MCCK_PENDING); | |
480 | } | |
481 | ||
482 | if (mci->w) { | |
483 | /* Warning pending */ | |
484 | mcck->warning = 1; | |
485 | set_thread_flag(TIF_MCCK_PENDING); | |
486 | } | |
8e9ccae6 | 487 | lockdep_on(); |
1da177e4 LT |
488 | } |
489 | ||
490 | /* | |
491 | * s390_init_machine_check | |
492 | * | |
493 | * initialize machine check handling | |
494 | */ | |
495 | static int | |
496 | machine_check_init(void) | |
497 | { | |
498 | init_MUTEX_LOCKED(&m_sem); | |
77fa2245 HC |
499 | ctl_clear_bit(14, 25); /* disable external damage MCH */ |
500 | ctl_set_bit(14, 27); /* enable system recovery MCH */ | |
1da177e4 LT |
501 | #ifdef CONFIG_MACHCHK_WARNING |
502 | ctl_set_bit(14, 24); /* enable warning MCH */ | |
503 | #endif | |
504 | return 0; | |
505 | } | |
506 | ||
507 | /* | |
508 | * Initialize the machine check handler really early to be able to | |
509 | * catch all machine checks that happen during boot | |
510 | */ | |
511 | arch_initcall(machine_check_init); | |
512 | ||
513 | /* | |
514 | * Machine checks for the channel subsystem must be enabled | |
515 | * after the channel subsystem is initialized | |
516 | */ | |
517 | static int __init | |
518 | machine_check_crw_init (void) | |
519 | { | |
84d11c5d | 520 | kthread_run(s390_collect_crw_info, &m_sem, "kmcheck"); |
1da177e4 LT |
521 | ctl_set_bit(14, 28); /* enable channel report MCH */ |
522 | return 0; | |
523 | } | |
524 | ||
525 | device_initcall (machine_check_crw_init); |