]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/sparc/kernel/setup_64.c
scsi: qedf: Fix a potential NULL pointer dereference
[mirror_ubuntu-artful-kernel.git] / arch / sparc / kernel / setup_64.c
1 /*
2 * linux/arch/sparc64/kernel/setup.c
3 *
4 * Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/stddef.h>
13 #include <linux/unistd.h>
14 #include <linux/ptrace.h>
15 #include <asm/smp.h>
16 #include <linux/user.h>
17 #include <linux/screen_info.h>
18 #include <linux/delay.h>
19 #include <linux/fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/syscalls.h>
22 #include <linux/kdev_t.h>
23 #include <linux/major.h>
24 #include <linux/string.h>
25 #include <linux/init.h>
26 #include <linux/inet.h>
27 #include <linux/console.h>
28 #include <linux/root_dev.h>
29 #include <linux/interrupt.h>
30 #include <linux/cpu.h>
31 #include <linux/initrd.h>
32 #include <linux/module.h>
33 #include <linux/start_kernel.h>
34 #include <linux/bootmem.h>
35
36 #include <asm/io.h>
37 #include <asm/processor.h>
38 #include <asm/oplib.h>
39 #include <asm/page.h>
40 #include <asm/pgtable.h>
41 #include <asm/idprom.h>
42 #include <asm/head.h>
43 #include <asm/starfire.h>
44 #include <asm/mmu_context.h>
45 #include <asm/timer.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/mmu.h>
49 #include <asm/ns87303.h>
50 #include <asm/btext.h>
51 #include <asm/elf.h>
52 #include <asm/mdesc.h>
53 #include <asm/cacheflush.h>
54 #include <asm/dma.h>
55 #include <asm/irq.h>
56
57 #ifdef CONFIG_IP_PNP
58 #include <net/ipconfig.h>
59 #endif
60
61 #include "entry.h"
62 #include "kernel.h"
63
64 /* Used to synchronize accesses to NatSemi SUPER I/O chip configure
65 * operations in asm/ns87303.h
66 */
67 DEFINE_SPINLOCK(ns87303_lock);
68 EXPORT_SYMBOL(ns87303_lock);
69
70 struct screen_info screen_info = {
71 0, 0, /* orig-x, orig-y */
72 0, /* unused */
73 0, /* orig-video-page */
74 0, /* orig-video-mode */
75 128, /* orig-video-cols */
76 0, 0, 0, /* unused, ega_bx, unused */
77 54, /* orig-video-lines */
78 0, /* orig-video-isVGA */
79 16 /* orig-video-points */
80 };
81
82 static void
83 prom_console_write(struct console *con, const char *s, unsigned int n)
84 {
85 prom_write(s, n);
86 }
87
88 /* Exported for mm/init.c:paging_init. */
89 unsigned long cmdline_memory_size = 0;
90
91 static struct console prom_early_console = {
92 .name = "earlyprom",
93 .write = prom_console_write,
94 .flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
95 .index = -1,
96 };
97
98 /*
99 * Process kernel command line switches that are specific to the
100 * SPARC or that require special low-level processing.
101 */
102 static void __init process_switch(char c)
103 {
104 switch (c) {
105 case 'd':
106 case 's':
107 break;
108 case 'h':
109 prom_printf("boot_flags_init: Halt!\n");
110 prom_halt();
111 break;
112 case 'p':
113 prom_early_console.flags &= ~CON_BOOT;
114 break;
115 case 'P':
116 /* Force UltraSPARC-III P-Cache on. */
117 if (tlb_type != cheetah) {
118 printk("BOOT: Ignoring P-Cache force option.\n");
119 break;
120 }
121 cheetah_pcache_forced_on = 1;
122 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
123 cheetah_enable_pcache();
124 break;
125
126 default:
127 printk("Unknown boot switch (-%c)\n", c);
128 break;
129 }
130 }
131
132 static void __init boot_flags_init(char *commands)
133 {
134 while (*commands) {
135 /* Move to the start of the next "argument". */
136 while (*commands == ' ')
137 commands++;
138
139 /* Process any command switches, otherwise skip it. */
140 if (*commands == '\0')
141 break;
142 if (*commands == '-') {
143 commands++;
144 while (*commands && *commands != ' ')
145 process_switch(*commands++);
146 continue;
147 }
148 if (!strncmp(commands, "mem=", 4))
149 cmdline_memory_size = memparse(commands + 4, &commands);
150
151 while (*commands && *commands != ' ')
152 commands++;
153 }
154 }
155
156 extern unsigned short root_flags;
157 extern unsigned short root_dev;
158 extern unsigned short ram_flags;
159 #define RAMDISK_IMAGE_START_MASK 0x07FF
160 #define RAMDISK_PROMPT_FLAG 0x8000
161 #define RAMDISK_LOAD_FLAG 0x4000
162
163 extern int root_mountflags;
164
165 char reboot_command[COMMAND_LINE_SIZE];
166
167 static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
168
169 static void __init per_cpu_patch(void)
170 {
171 struct cpuid_patch_entry *p;
172 unsigned long ver;
173 int is_jbus;
174
175 if (tlb_type == spitfire && !this_is_starfire)
176 return;
177
178 is_jbus = 0;
179 if (tlb_type != hypervisor) {
180 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
181 is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
182 (ver >> 32UL) == __SERRANO_ID);
183 }
184
185 p = &__cpuid_patch;
186 while (p < &__cpuid_patch_end) {
187 unsigned long addr = p->addr;
188 unsigned int *insns;
189
190 switch (tlb_type) {
191 case spitfire:
192 insns = &p->starfire[0];
193 break;
194 case cheetah:
195 case cheetah_plus:
196 if (is_jbus)
197 insns = &p->cheetah_jbus[0];
198 else
199 insns = &p->cheetah_safari[0];
200 break;
201 case hypervisor:
202 insns = &p->sun4v[0];
203 break;
204 default:
205 prom_printf("Unknown cpu type, halting.\n");
206 prom_halt();
207 }
208
209 *(unsigned int *) (addr + 0) = insns[0];
210 wmb();
211 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
212
213 *(unsigned int *) (addr + 4) = insns[1];
214 wmb();
215 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
216
217 *(unsigned int *) (addr + 8) = insns[2];
218 wmb();
219 __asm__ __volatile__("flush %0" : : "r" (addr + 8));
220
221 *(unsigned int *) (addr + 12) = insns[3];
222 wmb();
223 __asm__ __volatile__("flush %0" : : "r" (addr + 12));
224
225 p++;
226 }
227 }
228
229 void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
230 struct sun4v_1insn_patch_entry *end)
231 {
232 while (start < end) {
233 unsigned long addr = start->addr;
234
235 *(unsigned int *) (addr + 0) = start->insn;
236 wmb();
237 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
238
239 start++;
240 }
241 }
242
243 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
244 struct sun4v_2insn_patch_entry *end)
245 {
246 while (start < end) {
247 unsigned long addr = start->addr;
248
249 *(unsigned int *) (addr + 0) = start->insns[0];
250 wmb();
251 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
252
253 *(unsigned int *) (addr + 4) = start->insns[1];
254 wmb();
255 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
256
257 start++;
258 }
259 }
260
261 void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
262 struct sun4v_2insn_patch_entry *end)
263 {
264 while (start < end) {
265 unsigned long addr = start->addr;
266
267 *(unsigned int *) (addr + 0) = start->insns[0];
268 wmb();
269 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
270
271 *(unsigned int *) (addr + 4) = start->insns[1];
272 wmb();
273 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
274
275 start++;
276 }
277 }
278
279 static void __init sun4v_patch(void)
280 {
281 extern void sun4v_hvapi_init(void);
282
283 if (tlb_type != hypervisor)
284 return;
285
286 sun4v_patch_1insn_range(&__sun4v_1insn_patch,
287 &__sun4v_1insn_patch_end);
288
289 sun4v_patch_2insn_range(&__sun4v_2insn_patch,
290 &__sun4v_2insn_patch_end);
291 if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
292 sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
293 sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
294 &__sun_m7_2insn_patch_end);
295
296 sun4v_hvapi_init();
297 }
298
299 static void __init popc_patch(void)
300 {
301 struct popc_3insn_patch_entry *p3;
302 struct popc_6insn_patch_entry *p6;
303
304 p3 = &__popc_3insn_patch;
305 while (p3 < &__popc_3insn_patch_end) {
306 unsigned long i, addr = p3->addr;
307
308 for (i = 0; i < 3; i++) {
309 *(unsigned int *) (addr + (i * 4)) = p3->insns[i];
310 wmb();
311 __asm__ __volatile__("flush %0"
312 : : "r" (addr + (i * 4)));
313 }
314
315 p3++;
316 }
317
318 p6 = &__popc_6insn_patch;
319 while (p6 < &__popc_6insn_patch_end) {
320 unsigned long i, addr = p6->addr;
321
322 for (i = 0; i < 6; i++) {
323 *(unsigned int *) (addr + (i * 4)) = p6->insns[i];
324 wmb();
325 __asm__ __volatile__("flush %0"
326 : : "r" (addr + (i * 4)));
327 }
328
329 p6++;
330 }
331 }
332
333 static void __init pause_patch(void)
334 {
335 struct pause_patch_entry *p;
336
337 p = &__pause_3insn_patch;
338 while (p < &__pause_3insn_patch_end) {
339 unsigned long i, addr = p->addr;
340
341 for (i = 0; i < 3; i++) {
342 *(unsigned int *) (addr + (i * 4)) = p->insns[i];
343 wmb();
344 __asm__ __volatile__("flush %0"
345 : : "r" (addr + (i * 4)));
346 }
347
348 p++;
349 }
350 }
351
352 void __init start_early_boot(void)
353 {
354 int cpu;
355
356 check_if_starfire();
357 per_cpu_patch();
358 sun4v_patch();
359
360 cpu = hard_smp_processor_id();
361 if (cpu >= NR_CPUS) {
362 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
363 cpu, NR_CPUS);
364 prom_halt();
365 }
366 current_thread_info()->cpu = cpu;
367
368 time_init_early();
369 prom_init_report();
370 start_kernel();
371 }
372
373 /* On Ultra, we support all of the v8 capabilities. */
374 unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
375 HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
376 HWCAP_SPARC_V9);
377 EXPORT_SYMBOL(sparc64_elf_hwcap);
378
379 static const char *hwcaps[] = {
380 "flush", "stbar", "swap", "muldiv", "v9",
381 "ultra3", "blkinit", "n2",
382
383 /* These strings are as they appear in the machine description
384 * 'hwcap-list' property for cpu nodes.
385 */
386 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
387 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
388 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
389 "adp",
390 };
391
392 static const char *crypto_hwcaps[] = {
393 "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256",
394 "sha512", "mpmul", "montmul", "montsqr", "crc32c",
395 };
396
397 void cpucap_info(struct seq_file *m)
398 {
399 unsigned long caps = sparc64_elf_hwcap;
400 int i, printed = 0;
401
402 seq_puts(m, "cpucaps\t\t: ");
403 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
404 unsigned long bit = 1UL << i;
405 if (hwcaps[i] && (caps & bit)) {
406 seq_printf(m, "%s%s",
407 printed ? "," : "", hwcaps[i]);
408 printed++;
409 }
410 }
411 if (caps & HWCAP_SPARC_CRYPTO) {
412 unsigned long cfr;
413
414 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
415 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
416 unsigned long bit = 1UL << i;
417 if (cfr & bit) {
418 seq_printf(m, "%s%s",
419 printed ? "," : "", crypto_hwcaps[i]);
420 printed++;
421 }
422 }
423 }
424 seq_putc(m, '\n');
425 }
426
427 static void __init report_one_hwcap(int *printed, const char *name)
428 {
429 if ((*printed) == 0)
430 printk(KERN_INFO "CPU CAPS: [");
431 printk(KERN_CONT "%s%s",
432 (*printed) ? "," : "", name);
433 if (++(*printed) == 8) {
434 printk(KERN_CONT "]\n");
435 *printed = 0;
436 }
437 }
438
439 static void __init report_crypto_hwcaps(int *printed)
440 {
441 unsigned long cfr;
442 int i;
443
444 __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr));
445
446 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
447 unsigned long bit = 1UL << i;
448 if (cfr & bit)
449 report_one_hwcap(printed, crypto_hwcaps[i]);
450 }
451 }
452
453 static void __init report_hwcaps(unsigned long caps)
454 {
455 int i, printed = 0;
456
457 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
458 unsigned long bit = 1UL << i;
459 if (hwcaps[i] && (caps & bit))
460 report_one_hwcap(&printed, hwcaps[i]);
461 }
462 if (caps & HWCAP_SPARC_CRYPTO)
463 report_crypto_hwcaps(&printed);
464 if (printed != 0)
465 printk(KERN_CONT "]\n");
466 }
467
468 static unsigned long __init mdesc_cpu_hwcap_list(void)
469 {
470 struct mdesc_handle *hp;
471 unsigned long caps = 0;
472 const char *prop;
473 int len;
474 u64 pn;
475
476 hp = mdesc_grab();
477 if (!hp)
478 return 0;
479
480 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
481 if (pn == MDESC_NODE_NULL)
482 goto out;
483
484 prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
485 if (!prop)
486 goto out;
487
488 while (len) {
489 int i, plen;
490
491 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
492 unsigned long bit = 1UL << i;
493
494 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
495 caps |= bit;
496 break;
497 }
498 }
499 for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) {
500 if (!strcmp(prop, crypto_hwcaps[i]))
501 caps |= HWCAP_SPARC_CRYPTO;
502 }
503
504 plen = strlen(prop) + 1;
505 prop += plen;
506 len -= plen;
507 }
508
509 out:
510 mdesc_release(hp);
511 return caps;
512 }
513
514 /* This yields a mask that user programs can use to figure out what
515 * instruction set this cpu supports.
516 */
517 static void __init init_sparc64_elf_hwcap(void)
518 {
519 unsigned long cap = sparc64_elf_hwcap;
520 unsigned long mdesc_caps;
521
522 if (tlb_type == cheetah || tlb_type == cheetah_plus)
523 cap |= HWCAP_SPARC_ULTRA3;
524 else if (tlb_type == hypervisor) {
525 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
526 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
527 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
528 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
529 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
530 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
531 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
532 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
533 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
534 cap |= HWCAP_SPARC_BLKINIT;
535 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
536 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
537 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
538 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
539 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
540 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
541 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
542 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
543 cap |= HWCAP_SPARC_N2;
544 }
545
546 cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
547
548 mdesc_caps = mdesc_cpu_hwcap_list();
549 if (!mdesc_caps) {
550 if (tlb_type == spitfire)
551 cap |= AV_SPARC_VIS;
552 if (tlb_type == cheetah || tlb_type == cheetah_plus)
553 cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
554 if (tlb_type == cheetah_plus) {
555 unsigned long impl, ver;
556
557 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
558 impl = ((ver >> 32) & 0xffff);
559 if (impl == PANTHER_IMPL)
560 cap |= AV_SPARC_POPC;
561 }
562 if (tlb_type == hypervisor) {
563 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
564 cap |= AV_SPARC_ASI_BLK_INIT;
565 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
566 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
567 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
568 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
569 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
570 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
571 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
572 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
573 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
574 AV_SPARC_ASI_BLK_INIT |
575 AV_SPARC_POPC);
576 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
577 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
578 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
579 sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
580 sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
581 sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
582 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
583 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
584 AV_SPARC_FMAF);
585 }
586 }
587 sparc64_elf_hwcap = cap | mdesc_caps;
588
589 report_hwcaps(sparc64_elf_hwcap);
590
591 if (sparc64_elf_hwcap & AV_SPARC_POPC)
592 popc_patch();
593 if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
594 pause_patch();
595 }
596
597 void __init alloc_irqstack_bootmem(void)
598 {
599 unsigned int i, node;
600
601 for_each_possible_cpu(i) {
602 node = cpu_to_node(i);
603
604 softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
605 THREAD_SIZE,
606 THREAD_SIZE, 0);
607 hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
608 THREAD_SIZE,
609 THREAD_SIZE, 0);
610 }
611 }
612
613 void __init setup_arch(char **cmdline_p)
614 {
615 /* Initialize PROM console and command line. */
616 *cmdline_p = prom_getbootargs();
617 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
618 parse_early_param();
619
620 boot_flags_init(*cmdline_p);
621 #ifdef CONFIG_EARLYFB
622 if (btext_find_display())
623 #endif
624 register_console(&prom_early_console);
625
626 if (tlb_type == hypervisor)
627 printk("ARCH: SUN4V\n");
628 else
629 printk("ARCH: SUN4U\n");
630
631 #ifdef CONFIG_DUMMY_CONSOLE
632 conswitchp = &dummy_con;
633 #endif
634
635 idprom_init();
636
637 if (!root_flags)
638 root_mountflags &= ~MS_RDONLY;
639 ROOT_DEV = old_decode_dev(root_dev);
640 #ifdef CONFIG_BLK_DEV_RAM
641 rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
642 rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
643 rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
644 #endif
645
646 task_thread_info(&init_task)->kregs = &fake_swapper_regs;
647
648 #ifdef CONFIG_IP_PNP
649 if (!ic_set_manually) {
650 phandle chosen = prom_finddevice("/chosen");
651 u32 cl, sv, gw;
652
653 cl = prom_getintdefault (chosen, "client-ip", 0);
654 sv = prom_getintdefault (chosen, "server-ip", 0);
655 gw = prom_getintdefault (chosen, "gateway-ip", 0);
656 if (cl && sv) {
657 ic_myaddr = cl;
658 ic_servaddr = sv;
659 if (gw)
660 ic_gateway = gw;
661 #if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
662 ic_proto_enabled = 0;
663 #endif
664 }
665 }
666 #endif
667
668 /* Get boot processor trap_block[] setup. */
669 init_cur_cpu_trap(current_thread_info());
670
671 paging_init();
672 init_sparc64_elf_hwcap();
673 smp_fill_in_cpu_possible_map();
674 /*
675 * Once the OF device tree and MDESC have been setup and nr_cpus has
676 * been parsed, we know the list of possible cpus. Therefore we can
677 * allocate the IRQ stacks.
678 */
679 alloc_irqstack_bootmem();
680 }
681
682 extern int stop_a_enabled;
683
684 void sun_do_break(void)
685 {
686 if (!stop_a_enabled)
687 return;
688
689 prom_printf("\n");
690 flush_user_windows();
691
692 prom_cmdline();
693 }
694 EXPORT_SYMBOL(sun_do_break);
695
696 int stop_a_enabled = 1;
697 EXPORT_SYMBOL(stop_a_enabled);