]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kernel/mce.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kernel / mce.c
CommitLineData
36df96f8
MS
1/*
2 * Machine check exception handling.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2013 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#undef DEBUG
23#define pr_fmt(fmt) "mce: " fmt
24
ccd3cd36 25#include <linux/hardirq.h>
36df96f8
MS
26#include <linux/types.h>
27#include <linux/ptrace.h>
28#include <linux/percpu.h>
29#include <linux/export.h>
30c82635 30#include <linux/irq_work.h>
ccd3cd36
ME
31
32#include <asm/machdep.h>
36df96f8
MS
33#include <asm/mce.h>
34
35static DEFINE_PER_CPU(int, mce_nest_count);
36static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
37
b5ff4211
MS
38/* Queue for delayed MCE events. */
39static DEFINE_PER_CPU(int, mce_queue_count);
40static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
41
733e4a4c
BS
42/* Queue for delayed MCE UE events. */
43static DEFINE_PER_CPU(int, mce_ue_count);
44static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
45 mce_ue_event_queue);
46
30c82635 47static void machine_check_process_queued_event(struct irq_work *work);
6371013d 48static void machine_check_ue_irq_work(struct irq_work *work);
733e4a4c
BS
49void machine_check_ue_event(struct machine_check_event *evt);
50static void machine_process_ue_event(struct work_struct *work);
51
635218c7 52static struct irq_work mce_event_process_work = {
30c82635
MS
53 .func = machine_check_process_queued_event,
54};
55
6371013d
SS
56static struct irq_work mce_ue_event_irq_work = {
57 .func = machine_check_ue_irq_work,
58};
59
733e4a4c
BS
60DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
61
36df96f8
MS
62static void mce_set_error_info(struct machine_check_event *mce,
63 struct mce_error_info *mce_err)
64{
65 mce->error_type = mce_err->error_type;
66 switch (mce_err->error_type) {
67 case MCE_ERROR_TYPE_UE:
68 mce->u.ue_error.ue_error_type = mce_err->u.ue_error_type;
69 break;
70 case MCE_ERROR_TYPE_SLB:
71 mce->u.slb_error.slb_error_type = mce_err->u.slb_error_type;
72 break;
73 case MCE_ERROR_TYPE_ERAT:
74 mce->u.erat_error.erat_error_type = mce_err->u.erat_error_type;
75 break;
76 case MCE_ERROR_TYPE_TLB:
77 mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
78 break;
7b9f71f9
NP
79 case MCE_ERROR_TYPE_USER:
80 mce->u.user_error.user_error_type = mce_err->u.user_error_type;
81 break;
82 case MCE_ERROR_TYPE_RA:
83 mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
84 break;
85 case MCE_ERROR_TYPE_LINK:
86 mce->u.link_error.link_error_type = mce_err->u.link_error_type;
87 break;
36df96f8
MS
88 case MCE_ERROR_TYPE_UNKNOWN:
89 default:
90 break;
91 }
92}
93
94/*
95 * Decode and save high level MCE information into per cpu buffer which
96 * is an array of machine_check_event structure.
97 */
98void save_mce_event(struct pt_regs *regs, long handled,
99 struct mce_error_info *mce_err,
ba41e1e1 100 uint64_t nip, uint64_t addr, uint64_t phys_addr)
36df96f8 101{
ffb2d78e 102 int index = __this_cpu_inc_return(mce_nest_count) - 1;
69111bac 103 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
36df96f8
MS
104
105 /*
106 * Return if we don't have enough space to log mce event.
107 * mce_nest_count may go beyond MAX_MC_EVT but that's ok,
108 * the check below will stop buffer overrun.
109 */
110 if (index >= MAX_MC_EVT)
111 return;
112
113 /* Populate generic machine check info */
114 mce->version = MCE_V1;
55672ecf 115 mce->srr0 = nip;
36df96f8
MS
116 mce->srr1 = regs->msr;
117 mce->gpr3 = regs->gpr[3];
118 mce->in_use = 1;
119
c74dd88e
MS
120 /* Mark it recovered if we have handled it and MSR(RI=1). */
121 if (handled && (regs->msr & MSR_RI))
36df96f8
MS
122 mce->disposition = MCE_DISPOSITION_RECOVERED;
123 else
124 mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
c1bbf387
NP
125
126 mce->initiator = mce_err->initiator;
127 mce->severity = mce_err->severity;
36df96f8 128
36df96f8
MS
129 /*
130 * Populate the mce error_type and type-specific error_type.
131 */
132 mce_set_error_info(mce, mce_err);
133
134 if (!addr)
135 return;
136
137 if (mce->error_type == MCE_ERROR_TYPE_TLB) {
138 mce->u.tlb_error.effective_address_provided = true;
139 mce->u.tlb_error.effective_address = addr;
140 } else if (mce->error_type == MCE_ERROR_TYPE_SLB) {
141 mce->u.slb_error.effective_address_provided = true;
142 mce->u.slb_error.effective_address = addr;
143 } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
144 mce->u.erat_error.effective_address_provided = true;
145 mce->u.erat_error.effective_address = addr;
7b9f71f9
NP
146 } else if (mce->error_type == MCE_ERROR_TYPE_USER) {
147 mce->u.user_error.effective_address_provided = true;
148 mce->u.user_error.effective_address = addr;
149 } else if (mce->error_type == MCE_ERROR_TYPE_RA) {
150 mce->u.ra_error.effective_address_provided = true;
151 mce->u.ra_error.effective_address = addr;
152 } else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
153 mce->u.link_error.effective_address_provided = true;
154 mce->u.link_error.effective_address = addr;
36df96f8
MS
155 } else if (mce->error_type == MCE_ERROR_TYPE_UE) {
156 mce->u.ue_error.effective_address_provided = true;
157 mce->u.ue_error.effective_address = addr;
ba41e1e1
BS
158 if (phys_addr != ULONG_MAX) {
159 mce->u.ue_error.physical_address_provided = true;
160 mce->u.ue_error.physical_address = phys_addr;
733e4a4c 161 machine_check_ue_event(mce);
ba41e1e1 162 }
36df96f8
MS
163 }
164 return;
165}
166
167/*
168 * get_mce_event:
169 * mce Pointer to machine_check_event structure to be filled.
170 * release Flag to indicate whether to free the event slot or not.
171 * 0 <= do not release the mce event. Caller will invoke
172 * release_mce_event() once event has been consumed.
173 * 1 <= release the slot.
174 *
175 * return 1 = success
176 * 0 = failure
177 *
178 * get_mce_event() will be called by platform specific machine check
179 * handle routine and in KVM.
180 * When we call get_mce_event(), we are still in interrupt context and
181 * preemption will not be scheduled until ret_from_expect() routine
182 * is called.
183 */
184int get_mce_event(struct machine_check_event *mce, bool release)
185{
69111bac 186 int index = __this_cpu_read(mce_nest_count) - 1;
36df96f8
MS
187 struct machine_check_event *mc_evt;
188 int ret = 0;
189
190 /* Sanity check */
191 if (index < 0)
192 return ret;
193
194 /* Check if we have MCE info to process. */
195 if (index < MAX_MC_EVT) {
69111bac 196 mc_evt = this_cpu_ptr(&mce_event[index]);
36df96f8
MS
197 /* Copy the event structure and release the original */
198 if (mce)
199 *mce = *mc_evt;
200 if (release)
201 mc_evt->in_use = 0;
202 ret = 1;
203 }
204 /* Decrement the count to free the slot. */
205 if (release)
69111bac 206 __this_cpu_dec(mce_nest_count);
36df96f8
MS
207
208 return ret;
209}
210
211void release_mce_event(void)
212{
213 get_mce_event(NULL, true);
214}
b5ff4211 215
6371013d
SS
216static void machine_check_ue_irq_work(struct irq_work *work)
217{
218 schedule_work(&mce_ue_event_work);
219}
733e4a4c
BS
220
221/*
222 * Queue up the MCE event which then can be handled later.
223 */
224void machine_check_ue_event(struct machine_check_event *evt)
225{
226 int index;
227
228 index = __this_cpu_inc_return(mce_ue_count) - 1;
229 /* If queue is full, just return for now. */
230 if (index >= MAX_MC_EVT) {
231 __this_cpu_dec(mce_ue_count);
232 return;
233 }
234 memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
235
236 /* Queue work to process this event later. */
6371013d 237 irq_work_queue(&mce_ue_event_irq_work);
733e4a4c
BS
238}
239
b5ff4211
MS
240/*
241 * Queue up the MCE event which then can be handled later.
242 */
243void machine_check_queue_event(void)
244{
245 int index;
246 struct machine_check_event evt;
247
248 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
249 return;
250
ffb2d78e 251 index = __this_cpu_inc_return(mce_queue_count) - 1;
b5ff4211
MS
252 /* If queue is full, just return for now. */
253 if (index >= MAX_MC_EVT) {
69111bac 254 __this_cpu_dec(mce_queue_count);
b5ff4211
MS
255 return;
256 }
69111bac 257 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
30c82635
MS
258
259 /* Queue irq work to process this event later. */
260 irq_work_queue(&mce_event_process_work);
b5ff4211 261}
733e4a4c
BS
262/*
263 * process pending MCE event from the mce event queue. This function will be
264 * called during syscall exit.
265 */
266static void machine_process_ue_event(struct work_struct *work)
267{
268 int index;
269 struct machine_check_event *evt;
270
271 while (__this_cpu_read(mce_ue_count) > 0) {
272 index = __this_cpu_read(mce_ue_count) - 1;
273 evt = this_cpu_ptr(&mce_ue_event_queue[index]);
274#ifdef CONFIG_MEMORY_FAILURE
275 /*
276 * This should probably queued elsewhere, but
277 * oh! well
278 */
279 if (evt->error_type == MCE_ERROR_TYPE_UE) {
280 if (evt->u.ue_error.physical_address_provided) {
281 unsigned long pfn;
b5ff4211 282
733e4a4c
BS
283 pfn = evt->u.ue_error.physical_address >>
284 PAGE_SHIFT;
285 memory_failure(pfn, SIGBUS, 0);
286 } else
287 pr_warn("Failed to identify bad address from "
288 "where the uncorrectable error (UE) "
289 "was generated\n");
290 }
291#endif
292 __this_cpu_dec(mce_ue_count);
293 }
294}
b5ff4211
MS
295/*
296 * process pending MCE event from the mce event queue. This function will be
297 * called during syscall exit.
298 */
30c82635 299static void machine_check_process_queued_event(struct irq_work *work)
b5ff4211
MS
300{
301 int index;
733e4a4c 302 struct machine_check_event *evt;
b5ff4211 303
d93b0ac0
MS
304 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
305
b5ff4211
MS
306 /*
307 * For now just print it to console.
308 * TODO: log this error event to FSP or nvram.
309 */
69111bac
CL
310 while (__this_cpu_read(mce_queue_count) > 0) {
311 index = __this_cpu_read(mce_queue_count) - 1;
733e4a4c
BS
312 evt = this_cpu_ptr(&mce_event_queue[index]);
313 machine_check_print_event_info(evt, false);
69111bac 314 __this_cpu_dec(mce_queue_count);
b5ff4211 315 }
b5ff4211
MS
316}
317
63f44d65
ME
318void machine_check_print_event_info(struct machine_check_event *evt,
319 bool user_mode)
b5ff4211
MS
320{
321 const char *level, *sevstr, *subtype;
322 static const char *mc_ue_types[] = {
323 "Indeterminate",
324 "Instruction fetch",
325 "Page table walk ifetch",
326 "Load/Store",
327 "Page table walk Load/Store",
328 };
329 static const char *mc_slb_types[] = {
330 "Indeterminate",
331 "Parity",
332 "Multihit",
333 };
334 static const char *mc_erat_types[] = {
335 "Indeterminate",
336 "Parity",
337 "Multihit",
338 };
339 static const char *mc_tlb_types[] = {
340 "Indeterminate",
341 "Parity",
342 "Multihit",
343 };
7b9f71f9
NP
344 static const char *mc_user_types[] = {
345 "Indeterminate",
346 "tlbie(l) invalid",
347 };
348 static const char *mc_ra_types[] = {
349 "Indeterminate",
350 "Instruction fetch (bad)",
90df4bfb 351 "Instruction fetch (foreign)",
7b9f71f9
NP
352 "Page table walk ifetch (bad)",
353 "Page table walk ifetch (foreign)",
354 "Load (bad)",
355 "Store (bad)",
356 "Page table walk Load/Store (bad)",
357 "Page table walk Load/Store (foreign)",
358 "Load/Store (foreign)",
359 };
360 static const char *mc_link_types[] = {
361 "Indeterminate",
362 "Instruction fetch (timeout)",
363 "Page table walk ifetch (timeout)",
364 "Load (timeout)",
365 "Store (timeout)",
366 "Page table walk Load/Store (timeout)",
367 };
b5ff4211
MS
368
369 /* Print things out */
370 if (evt->version != MCE_V1) {
371 pr_err("Machine Check Exception, Unknown event version %d !\n",
372 evt->version);
373 return;
374 }
375 switch (evt->severity) {
376 case MCE_SEV_NO_ERROR:
377 level = KERN_INFO;
378 sevstr = "Harmless";
379 break;
380 case MCE_SEV_WARNING:
381 level = KERN_WARNING;
382 sevstr = "";
383 break;
384 case MCE_SEV_ERROR_SYNC:
385 level = KERN_ERR;
386 sevstr = "Severe";
387 break;
388 case MCE_SEV_FATAL:
389 default:
390 level = KERN_ERR;
391 sevstr = "Fatal";
392 break;
393 }
394
395 printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
396 evt->disposition == MCE_DISPOSITION_RECOVERED ?
fc84427b 397 "Recovered" : "Not recovered");
63f44d65
ME
398
399 if (user_mode) {
400 printk("%s NIP: [%016llx] PID: %d Comm: %s\n", level,
401 evt->srr0, current->pid, current->comm);
402 } else {
403 printk("%s NIP [%016llx]: %pS\n", level, evt->srr0,
404 (void *)evt->srr0);
405 }
406
b5ff4211
MS
407 printk("%s Initiator: %s\n", level,
408 evt->initiator == MCE_INITIATOR_CPU ? "CPU" : "Unknown");
409 switch (evt->error_type) {
410 case MCE_ERROR_TYPE_UE:
411 subtype = evt->u.ue_error.ue_error_type <
412 ARRAY_SIZE(mc_ue_types) ?
413 mc_ue_types[evt->u.ue_error.ue_error_type]
414 : "Unknown";
415 printk("%s Error type: UE [%s]\n", level, subtype);
416 if (evt->u.ue_error.effective_address_provided)
417 printk("%s Effective address: %016llx\n",
418 level, evt->u.ue_error.effective_address);
419 if (evt->u.ue_error.physical_address_provided)
81b61fa7 420 printk("%s Physical address: %016llx\n",
b5ff4211
MS
421 level, evt->u.ue_error.physical_address);
422 break;
423 case MCE_ERROR_TYPE_SLB:
424 subtype = evt->u.slb_error.slb_error_type <
425 ARRAY_SIZE(mc_slb_types) ?
426 mc_slb_types[evt->u.slb_error.slb_error_type]
427 : "Unknown";
428 printk("%s Error type: SLB [%s]\n", level, subtype);
429 if (evt->u.slb_error.effective_address_provided)
430 printk("%s Effective address: %016llx\n",
431 level, evt->u.slb_error.effective_address);
432 break;
433 case MCE_ERROR_TYPE_ERAT:
434 subtype = evt->u.erat_error.erat_error_type <
435 ARRAY_SIZE(mc_erat_types) ?
436 mc_erat_types[evt->u.erat_error.erat_error_type]
437 : "Unknown";
438 printk("%s Error type: ERAT [%s]\n", level, subtype);
439 if (evt->u.erat_error.effective_address_provided)
440 printk("%s Effective address: %016llx\n",
441 level, evt->u.erat_error.effective_address);
442 break;
443 case MCE_ERROR_TYPE_TLB:
444 subtype = evt->u.tlb_error.tlb_error_type <
445 ARRAY_SIZE(mc_tlb_types) ?
446 mc_tlb_types[evt->u.tlb_error.tlb_error_type]
447 : "Unknown";
448 printk("%s Error type: TLB [%s]\n", level, subtype);
449 if (evt->u.tlb_error.effective_address_provided)
450 printk("%s Effective address: %016llx\n",
451 level, evt->u.tlb_error.effective_address);
452 break;
7b9f71f9
NP
453 case MCE_ERROR_TYPE_USER:
454 subtype = evt->u.user_error.user_error_type <
455 ARRAY_SIZE(mc_user_types) ?
456 mc_user_types[evt->u.user_error.user_error_type]
457 : "Unknown";
458 printk("%s Error type: User [%s]\n", level, subtype);
459 if (evt->u.user_error.effective_address_provided)
460 printk("%s Effective address: %016llx\n",
461 level, evt->u.user_error.effective_address);
462 break;
463 case MCE_ERROR_TYPE_RA:
464 subtype = evt->u.ra_error.ra_error_type <
465 ARRAY_SIZE(mc_ra_types) ?
466 mc_ra_types[evt->u.ra_error.ra_error_type]
467 : "Unknown";
468 printk("%s Error type: Real address [%s]\n", level, subtype);
469 if (evt->u.ra_error.effective_address_provided)
470 printk("%s Effective address: %016llx\n",
471 level, evt->u.ra_error.effective_address);
472 break;
473 case MCE_ERROR_TYPE_LINK:
474 subtype = evt->u.link_error.link_error_type <
475 ARRAY_SIZE(mc_link_types) ?
476 mc_link_types[evt->u.link_error.link_error_type]
477 : "Unknown";
478 printk("%s Error type: Link [%s]\n", level, subtype);
479 if (evt->u.link_error.effective_address_provided)
480 printk("%s Effective address: %016llx\n",
481 level, evt->u.link_error.effective_address);
482 break;
b5ff4211
MS
483 default:
484 case MCE_ERROR_TYPE_UNKNOWN:
485 printk("%s Error type: Unknown\n", level);
486 break;
487 }
488}
8aa586c6 489EXPORT_SYMBOL_GPL(machine_check_print_event_info);
b63a0ffe 490
ccd3cd36
ME
491/*
492 * This function is called in real mode. Strictly no printk's please.
493 *
494 * regs->nip and regs->msr contains srr0 and ssr1.
495 */
496long machine_check_early(struct pt_regs *regs)
497{
498 long handled = 0;
499
500 __this_cpu_inc(irq_stat.mce_exceptions);
501
502 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
503 handled = cur_cpu_spec->machine_check_early(regs);
504 return handled;
505}
506
a82ea149
PM
507/* Possible meanings for HMER_DEBUG_TRIG bit being set on POWER9 */
508static enum {
509 DTRIG_UNKNOWN,
510 DTRIG_VECTOR_CI, /* need to emulate vector CI load instr */
511 DTRIG_SUSPEND_ESCAPE, /* need to escape from TM suspend mode */
512} hmer_debug_trig_function;
513
514static int init_debug_trig_function(void)
ccd3cd36 515{
a82ea149
PM
516 int pvr;
517 struct device_node *cpun;
518 struct property *prop = NULL;
519 const char *str;
520
521 /* First look in the device tree */
522 preempt_disable();
523 cpun = of_get_cpu_node(smp_processor_id(), NULL);
524 if (cpun) {
525 of_property_for_each_string(cpun, "ibm,hmi-special-triggers",
526 prop, str) {
527 if (strcmp(str, "bit17-vector-ci-load") == 0)
528 hmer_debug_trig_function = DTRIG_VECTOR_CI;
529 else if (strcmp(str, "bit17-tm-suspend-escape") == 0)
530 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
5080332c 531 }
a82ea149
PM
532 of_node_put(cpun);
533 }
534 preempt_enable();
535
536 /* If we found the property, don't look at PVR */
537 if (prop)
538 goto out;
539
540 pvr = mfspr(SPRN_PVR);
541 /* Check for POWER9 Nimbus (scale-out) */
542 if ((PVR_VER(pvr) == PVR_POWER9) && (pvr & 0xe000) == 0) {
543 /* DD2.2 and later */
544 if ((pvr & 0xfff) >= 0x202)
545 hmer_debug_trig_function = DTRIG_SUSPEND_ESCAPE;
546 /* DD2.0 and DD2.1 - used for vector CI load emulation */
547 else if ((pvr & 0xfff) >= 0x200)
548 hmer_debug_trig_function = DTRIG_VECTOR_CI;
549 }
550
551 out:
552 switch (hmer_debug_trig_function) {
553 case DTRIG_VECTOR_CI:
554 pr_debug("HMI debug trigger used for vector CI load\n");
555 break;
556 case DTRIG_SUSPEND_ESCAPE:
557 pr_debug("HMI debug trigger used for TM suspend escape\n");
558 break;
559 default:
560 break;
5080332c 561 }
a82ea149
PM
562 return 0;
563}
564__initcall(init_debug_trig_function);
565
566/*
567 * Handle HMIs that occur as a result of a debug trigger.
568 * Return values:
569 * -1 means this is not a HMI cause that we know about
570 * 0 means no further handling is required
571 * 1 means further handling is required
572 */
573long hmi_handle_debugtrig(struct pt_regs *regs)
574{
575 unsigned long hmer = mfspr(SPRN_HMER);
576 long ret = 0;
577
578 /* HMER_DEBUG_TRIG bit is used for various workarounds on P9 */
579 if (!((hmer & HMER_DEBUG_TRIG)
580 && hmer_debug_trig_function != DTRIG_UNKNOWN))
581 return -1;
582
583 hmer &= ~HMER_DEBUG_TRIG;
584 /* HMER is a write-AND register */
585 mtspr(SPRN_HMER, ~HMER_DEBUG_TRIG);
586
587 switch (hmer_debug_trig_function) {
588 case DTRIG_VECTOR_CI:
589 /*
590 * Now to avoid problems with soft-disable we
591 * only do the emulation if we are coming from
592 * host user space
593 */
594 if (regs && user_mode(regs))
595 ret = local_paca->hmi_p9_special_emu = 1;
596
597 break;
598
599 default:
600 break;
601 }
602
603 /*
604 * See if any other HMI causes remain to be handled
605 */
606 if (hmer & mfspr(SPRN_HMEER))
607 return -1;
608
609 return ret;
610}
611
612/*
613 * Return values:
614 */
615long hmi_exception_realmode(struct pt_regs *regs)
616{
617 int ret;
618
619 __this_cpu_inc(irq_stat.hmi_exceptions);
620
621 ret = hmi_handle_debugtrig(regs);
622 if (ret >= 0)
623 return ret;
5080332c 624
ccd3cd36
ME
625 wait_for_subcore_guest_exit();
626
627 if (ppc_md.hmi_exception_early)
628 ppc_md.hmi_exception_early(regs);
629
630 wait_for_tb_resync();
631
5080332c 632 return 1;
ccd3cd36 633}